net/mlx5: modify TCP header using Direct Verbs
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_flow.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
26 #include <rte_ip.h>
27 #include <rte_gre.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
33 #include "mlx5_prm.h"
34 #include "mlx5_rxtx.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
40 #endif
41
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
44 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
45 #endif
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR
49 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
50 #endif
51
52 union flow_dv_attr {
53         struct {
54                 uint32_t valid:1;
55                 uint32_t ipv4:1;
56                 uint32_t ipv6:1;
57                 uint32_t tcp:1;
58                 uint32_t udp:1;
59                 uint32_t reserved:27;
60         };
61         uint32_t attr;
62 };
63
64 /**
65  * Initialize flow attributes structure according to flow items' types.
66  *
67  * @param[in] item
68  *   Pointer to item specification.
69  * @param[out] attr
70  *   Pointer to flow attributes structure.
71  */
72 static void
73 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
74 {
75         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
76                 switch (item->type) {
77                 case RTE_FLOW_ITEM_TYPE_IPV4:
78                         attr->ipv4 = 1;
79                         break;
80                 case RTE_FLOW_ITEM_TYPE_IPV6:
81                         attr->ipv6 = 1;
82                         break;
83                 case RTE_FLOW_ITEM_TYPE_UDP:
84                         attr->udp = 1;
85                         break;
86                 case RTE_FLOW_ITEM_TYPE_TCP:
87                         attr->tcp = 1;
88                         break;
89                 default:
90                         break;
91                 }
92         }
93         attr->valid = 1;
94 }
95
96 struct field_modify_info {
97         uint32_t size; /* Size of field in protocol header, in bytes. */
98         uint32_t offset; /* Offset of field in protocol header, in bytes. */
99         enum mlx5_modification_field id;
100 };
101
102 struct field_modify_info modify_eth[] = {
103         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
104         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
105         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
106         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
107         {0, 0, 0},
108 };
109
110 struct field_modify_info modify_ipv4[] = {
111         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
112         {4, 12, MLX5_MODI_OUT_SIPV4},
113         {4, 16, MLX5_MODI_OUT_DIPV4},
114         {0, 0, 0},
115 };
116
117 struct field_modify_info modify_ipv6[] = {
118         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
119         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
120         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
121         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
122         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
123         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
124         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
125         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
126         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
127         {0, 0, 0},
128 };
129
130 struct field_modify_info modify_udp[] = {
131         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
132         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
133         {0, 0, 0},
134 };
135
136 struct field_modify_info modify_tcp[] = {
137         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
138         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
139         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
140         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
141         {0, 0, 0},
142 };
143
144 /**
145  * Acquire the synchronizing object to protect multithreaded access
146  * to shared dv context. Lock occurs only if context is actually
147  * shared, i.e. we have multiport IB device and representors are
148  * created.
149  *
150  * @param[in] dev
151  *   Pointer to the rte_eth_dev structure.
152  */
153 static void
154 flow_d_shared_lock(struct rte_eth_dev *dev)
155 {
156         struct mlx5_priv *priv = dev->data->dev_private;
157         struct mlx5_ibv_shared *sh = priv->sh;
158
159         if (sh->dv_refcnt > 1) {
160                 int ret;
161
162                 ret = pthread_mutex_lock(&sh->dv_mutex);
163                 assert(!ret);
164                 (void)ret;
165         }
166 }
167
168 static void
169 flow_d_shared_unlock(struct rte_eth_dev *dev)
170 {
171         struct mlx5_priv *priv = dev->data->dev_private;
172         struct mlx5_ibv_shared *sh = priv->sh;
173
174         if (sh->dv_refcnt > 1) {
175                 int ret;
176
177                 ret = pthread_mutex_unlock(&sh->dv_mutex);
178                 assert(!ret);
179                 (void)ret;
180         }
181 }
182
183 /**
184  * Convert modify-header action to DV specification.
185  *
186  * @param[in] item
187  *   Pointer to item specification.
188  * @param[in] field
189  *   Pointer to field modification information.
190  * @param[in,out] resource
191  *   Pointer to the modify-header resource.
192  * @param[in] type
193  *   Type of modification.
194  * @param[out] error
195  *   Pointer to the error structure.
196  *
197  * @return
198  *   0 on success, a negative errno value otherwise and rte_errno is set.
199  */
200 static int
201 flow_dv_convert_modify_action(struct rte_flow_item *item,
202                               struct field_modify_info *field,
203                               struct mlx5_flow_dv_modify_hdr_resource *resource,
204                               uint32_t type,
205                               struct rte_flow_error *error)
206 {
207         uint32_t i = resource->actions_num;
208         struct mlx5_modification_cmd *actions = resource->actions;
209         const uint8_t *spec = item->spec;
210         const uint8_t *mask = item->mask;
211         uint32_t set;
212
213         while (field->size) {
214                 set = 0;
215                 /* Generate modify command for each mask segment. */
216                 memcpy(&set, &mask[field->offset], field->size);
217                 if (set) {
218                         if (i >= MLX5_MODIFY_NUM)
219                                 return rte_flow_error_set(error, EINVAL,
220                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
221                                          "too many items to modify");
222                         actions[i].action_type = type;
223                         actions[i].field = field->id;
224                         actions[i].length = field->size ==
225                                         4 ? 0 : field->size * 8;
226                         rte_memcpy(&actions[i].data[4 - field->size],
227                                    &spec[field->offset], field->size);
228                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
229                         ++i;
230                 }
231                 if (resource->actions_num != i)
232                         resource->actions_num = i;
233                 field++;
234         }
235         if (!resource->actions_num)
236                 return rte_flow_error_set(error, EINVAL,
237                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
238                                           "invalid modification flow item");
239         return 0;
240 }
241
242 /**
243  * Convert modify-header set IPv4 address action to DV specification.
244  *
245  * @param[in,out] resource
246  *   Pointer to the modify-header resource.
247  * @param[in] action
248  *   Pointer to action specification.
249  * @param[out] error
250  *   Pointer to the error structure.
251  *
252  * @return
253  *   0 on success, a negative errno value otherwise and rte_errno is set.
254  */
255 static int
256 flow_dv_convert_action_modify_ipv4
257                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
258                          const struct rte_flow_action *action,
259                          struct rte_flow_error *error)
260 {
261         const struct rte_flow_action_set_ipv4 *conf =
262                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
263         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
264         struct rte_flow_item_ipv4 ipv4;
265         struct rte_flow_item_ipv4 ipv4_mask;
266
267         memset(&ipv4, 0, sizeof(ipv4));
268         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
269         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
270                 ipv4.hdr.src_addr = conf->ipv4_addr;
271                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
272         } else {
273                 ipv4.hdr.dst_addr = conf->ipv4_addr;
274                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
275         }
276         item.spec = &ipv4;
277         item.mask = &ipv4_mask;
278         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
279                                              MLX5_MODIFICATION_TYPE_SET, error);
280 }
281
282 /**
283  * Convert modify-header set IPv6 address action to DV specification.
284  *
285  * @param[in,out] resource
286  *   Pointer to the modify-header resource.
287  * @param[in] action
288  *   Pointer to action specification.
289  * @param[out] error
290  *   Pointer to the error structure.
291  *
292  * @return
293  *   0 on success, a negative errno value otherwise and rte_errno is set.
294  */
295 static int
296 flow_dv_convert_action_modify_ipv6
297                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
298                          const struct rte_flow_action *action,
299                          struct rte_flow_error *error)
300 {
301         const struct rte_flow_action_set_ipv6 *conf =
302                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
303         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
304         struct rte_flow_item_ipv6 ipv6;
305         struct rte_flow_item_ipv6 ipv6_mask;
306
307         memset(&ipv6, 0, sizeof(ipv6));
308         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
309         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
310                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
311                        sizeof(ipv6.hdr.src_addr));
312                 memcpy(&ipv6_mask.hdr.src_addr,
313                        &rte_flow_item_ipv6_mask.hdr.src_addr,
314                        sizeof(ipv6.hdr.src_addr));
315         } else {
316                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
317                        sizeof(ipv6.hdr.dst_addr));
318                 memcpy(&ipv6_mask.hdr.dst_addr,
319                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
320                        sizeof(ipv6.hdr.dst_addr));
321         }
322         item.spec = &ipv6;
323         item.mask = &ipv6_mask;
324         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
325                                              MLX5_MODIFICATION_TYPE_SET, error);
326 }
327
328 /**
329  * Convert modify-header set MAC address action to DV specification.
330  *
331  * @param[in,out] resource
332  *   Pointer to the modify-header resource.
333  * @param[in] action
334  *   Pointer to action specification.
335  * @param[out] error
336  *   Pointer to the error structure.
337  *
338  * @return
339  *   0 on success, a negative errno value otherwise and rte_errno is set.
340  */
341 static int
342 flow_dv_convert_action_modify_mac
343                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
344                          const struct rte_flow_action *action,
345                          struct rte_flow_error *error)
346 {
347         const struct rte_flow_action_set_mac *conf =
348                 (const struct rte_flow_action_set_mac *)(action->conf);
349         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
350         struct rte_flow_item_eth eth;
351         struct rte_flow_item_eth eth_mask;
352
353         memset(&eth, 0, sizeof(eth));
354         memset(&eth_mask, 0, sizeof(eth_mask));
355         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
356                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
357                        sizeof(eth.src.addr_bytes));
358                 memcpy(&eth_mask.src.addr_bytes,
359                        &rte_flow_item_eth_mask.src.addr_bytes,
360                        sizeof(eth_mask.src.addr_bytes));
361         } else {
362                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
363                        sizeof(eth.dst.addr_bytes));
364                 memcpy(&eth_mask.dst.addr_bytes,
365                        &rte_flow_item_eth_mask.dst.addr_bytes,
366                        sizeof(eth_mask.dst.addr_bytes));
367         }
368         item.spec = &eth;
369         item.mask = &eth_mask;
370         return flow_dv_convert_modify_action(&item, modify_eth, resource,
371                                              MLX5_MODIFICATION_TYPE_SET, error);
372 }
373
374 /**
375  * Convert modify-header set TP action to DV specification.
376  *
377  * @param[in,out] resource
378  *   Pointer to the modify-header resource.
379  * @param[in] action
380  *   Pointer to action specification.
381  * @param[in] items
382  *   Pointer to rte_flow_item objects list.
383  * @param[in] attr
384  *   Pointer to flow attributes structure.
385  * @param[out] error
386  *   Pointer to the error structure.
387  *
388  * @return
389  *   0 on success, a negative errno value otherwise and rte_errno is set.
390  */
391 static int
392 flow_dv_convert_action_modify_tp
393                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
394                          const struct rte_flow_action *action,
395                          const struct rte_flow_item *items,
396                          union flow_dv_attr *attr,
397                          struct rte_flow_error *error)
398 {
399         const struct rte_flow_action_set_tp *conf =
400                 (const struct rte_flow_action_set_tp *)(action->conf);
401         struct rte_flow_item item;
402         struct rte_flow_item_udp udp;
403         struct rte_flow_item_udp udp_mask;
404         struct rte_flow_item_tcp tcp;
405         struct rte_flow_item_tcp tcp_mask;
406         struct field_modify_info *field;
407
408         if (!attr->valid)
409                 flow_dv_attr_init(items, attr);
410         if (attr->udp) {
411                 memset(&udp, 0, sizeof(udp));
412                 memset(&udp_mask, 0, sizeof(udp_mask));
413                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
414                         udp.hdr.src_port = conf->port;
415                         udp_mask.hdr.src_port =
416                                         rte_flow_item_udp_mask.hdr.src_port;
417                 } else {
418                         udp.hdr.dst_port = conf->port;
419                         udp_mask.hdr.dst_port =
420                                         rte_flow_item_udp_mask.hdr.dst_port;
421                 }
422                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
423                 item.spec = &udp;
424                 item.mask = &udp_mask;
425                 field = modify_udp;
426         }
427         if (attr->tcp) {
428                 memset(&tcp, 0, sizeof(tcp));
429                 memset(&tcp_mask, 0, sizeof(tcp_mask));
430                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
431                         tcp.hdr.src_port = conf->port;
432                         tcp_mask.hdr.src_port =
433                                         rte_flow_item_tcp_mask.hdr.src_port;
434                 } else {
435                         tcp.hdr.dst_port = conf->port;
436                         tcp_mask.hdr.dst_port =
437                                         rte_flow_item_tcp_mask.hdr.dst_port;
438                 }
439                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
440                 item.spec = &tcp;
441                 item.mask = &tcp_mask;
442                 field = modify_tcp;
443         }
444         return flow_dv_convert_modify_action(&item, field, resource,
445                                              MLX5_MODIFICATION_TYPE_SET, error);
446 }
447
448 /**
449  * Convert modify-header set TTL action to DV specification.
450  *
451  * @param[in,out] resource
452  *   Pointer to the modify-header resource.
453  * @param[in] action
454  *   Pointer to action specification.
455  * @param[in] items
456  *   Pointer to rte_flow_item objects list.
457  * @param[in] attr
458  *   Pointer to flow attributes structure.
459  * @param[out] error
460  *   Pointer to the error structure.
461  *
462  * @return
463  *   0 on success, a negative errno value otherwise and rte_errno is set.
464  */
465 static int
466 flow_dv_convert_action_modify_ttl
467                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
468                          const struct rte_flow_action *action,
469                          const struct rte_flow_item *items,
470                          union flow_dv_attr *attr,
471                          struct rte_flow_error *error)
472 {
473         const struct rte_flow_action_set_ttl *conf =
474                 (const struct rte_flow_action_set_ttl *)(action->conf);
475         struct rte_flow_item item;
476         struct rte_flow_item_ipv4 ipv4;
477         struct rte_flow_item_ipv4 ipv4_mask;
478         struct rte_flow_item_ipv6 ipv6;
479         struct rte_flow_item_ipv6 ipv6_mask;
480         struct field_modify_info *field;
481
482         if (!attr->valid)
483                 flow_dv_attr_init(items, attr);
484         if (attr->ipv4) {
485                 memset(&ipv4, 0, sizeof(ipv4));
486                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
487                 ipv4.hdr.time_to_live = conf->ttl_value;
488                 ipv4_mask.hdr.time_to_live = 0xFF;
489                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
490                 item.spec = &ipv4;
491                 item.mask = &ipv4_mask;
492                 field = modify_ipv4;
493         }
494         if (attr->ipv6) {
495                 memset(&ipv6, 0, sizeof(ipv6));
496                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
497                 ipv6.hdr.hop_limits = conf->ttl_value;
498                 ipv6_mask.hdr.hop_limits = 0xFF;
499                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
500                 item.spec = &ipv6;
501                 item.mask = &ipv6_mask;
502                 field = modify_ipv6;
503         }
504         return flow_dv_convert_modify_action(&item, field, resource,
505                                              MLX5_MODIFICATION_TYPE_SET, error);
506 }
507
508 /**
509  * Convert modify-header decrement TTL action to DV specification.
510  *
511  * @param[in,out] resource
512  *   Pointer to the modify-header resource.
513  * @param[in] action
514  *   Pointer to action specification.
515  * @param[in] items
516  *   Pointer to rte_flow_item objects list.
517  * @param[in] attr
518  *   Pointer to flow attributes structure.
519  * @param[out] error
520  *   Pointer to the error structure.
521  *
522  * @return
523  *   0 on success, a negative errno value otherwise and rte_errno is set.
524  */
525 static int
526 flow_dv_convert_action_modify_dec_ttl
527                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
528                          const struct rte_flow_item *items,
529                          union flow_dv_attr *attr,
530                          struct rte_flow_error *error)
531 {
532         struct rte_flow_item item;
533         struct rte_flow_item_ipv4 ipv4;
534         struct rte_flow_item_ipv4 ipv4_mask;
535         struct rte_flow_item_ipv6 ipv6;
536         struct rte_flow_item_ipv6 ipv6_mask;
537         struct field_modify_info *field;
538
539         if (!attr->valid)
540                 flow_dv_attr_init(items, attr);
541         if (attr->ipv4) {
542                 memset(&ipv4, 0, sizeof(ipv4));
543                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
544                 ipv4.hdr.time_to_live = 0xFF;
545                 ipv4_mask.hdr.time_to_live = 0xFF;
546                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
547                 item.spec = &ipv4;
548                 item.mask = &ipv4_mask;
549                 field = modify_ipv4;
550         }
551         if (attr->ipv6) {
552                 memset(&ipv6, 0, sizeof(ipv6));
553                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
554                 ipv6.hdr.hop_limits = 0xFF;
555                 ipv6_mask.hdr.hop_limits = 0xFF;
556                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
557                 item.spec = &ipv6;
558                 item.mask = &ipv6_mask;
559                 field = modify_ipv6;
560         }
561         return flow_dv_convert_modify_action(&item, field, resource,
562                                              MLX5_MODIFICATION_TYPE_ADD, error);
563 }
564
565 /**
566  * Convert modify-header increment/decrement TCP Sequence number
567  * to DV specification.
568  *
569  * @param[in,out] resource
570  *   Pointer to the modify-header resource.
571  * @param[in] action
572  *   Pointer to action specification.
573  * @param[out] error
574  *   Pointer to the error structure.
575  *
576  * @return
577  *   0 on success, a negative errno value otherwise and rte_errno is set.
578  */
579 static int
580 flow_dv_convert_action_modify_tcp_seq
581                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
582                          const struct rte_flow_action *action,
583                          struct rte_flow_error *error)
584 {
585         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
586         uint64_t value = rte_be_to_cpu_32(*conf);
587         struct rte_flow_item item;
588         struct rte_flow_item_tcp tcp;
589         struct rte_flow_item_tcp tcp_mask;
590
591         memset(&tcp, 0, sizeof(tcp));
592         memset(&tcp_mask, 0, sizeof(tcp_mask));
593         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
594                 /*
595                  * The HW has no decrement operation, only increment operation.
596                  * To simulate decrement X from Y using increment operation
597                  * we need to add UINT32_MAX X times to Y.
598                  * Each adding of UINT32_MAX decrements Y by 1.
599                  */
600                 value *= UINT32_MAX;
601         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
602         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
603         item.type = RTE_FLOW_ITEM_TYPE_TCP;
604         item.spec = &tcp;
605         item.mask = &tcp_mask;
606         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
607                                              MLX5_MODIFICATION_TYPE_ADD, error);
608 }
609
610 /**
611  * Convert modify-header increment/decrement TCP Acknowledgment number
612  * to DV specification.
613  *
614  * @param[in,out] resource
615  *   Pointer to the modify-header resource.
616  * @param[in] action
617  *   Pointer to action specification.
618  * @param[out] error
619  *   Pointer to the error structure.
620  *
621  * @return
622  *   0 on success, a negative errno value otherwise and rte_errno is set.
623  */
624 static int
625 flow_dv_convert_action_modify_tcp_ack
626                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
627                          const struct rte_flow_action *action,
628                          struct rte_flow_error *error)
629 {
630         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
631         uint64_t value = rte_be_to_cpu_32(*conf);
632         struct rte_flow_item item;
633         struct rte_flow_item_tcp tcp;
634         struct rte_flow_item_tcp tcp_mask;
635
636         memset(&tcp, 0, sizeof(tcp));
637         memset(&tcp_mask, 0, sizeof(tcp_mask));
638         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
639                 /*
640                  * The HW has no decrement operation, only increment operation.
641                  * To simulate decrement X from Y using increment operation
642                  * we need to add UINT32_MAX X times to Y.
643                  * Each adding of UINT32_MAX decrements Y by 1.
644                  */
645                 value *= UINT32_MAX;
646         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
647         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
648         item.type = RTE_FLOW_ITEM_TYPE_TCP;
649         item.spec = &tcp;
650         item.mask = &tcp_mask;
651         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
652                                              MLX5_MODIFICATION_TYPE_ADD, error);
653 }
654
655 /**
656  * Validate META item.
657  *
658  * @param[in] dev
659  *   Pointer to the rte_eth_dev structure.
660  * @param[in] item
661  *   Item specification.
662  * @param[in] attr
663  *   Attributes of flow that includes this item.
664  * @param[out] error
665  *   Pointer to error structure.
666  *
667  * @return
668  *   0 on success, a negative errno value otherwise and rte_errno is set.
669  */
670 static int
671 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
672                            const struct rte_flow_item *item,
673                            const struct rte_flow_attr *attr,
674                            struct rte_flow_error *error)
675 {
676         const struct rte_flow_item_meta *spec = item->spec;
677         const struct rte_flow_item_meta *mask = item->mask;
678         const struct rte_flow_item_meta nic_mask = {
679                 .data = RTE_BE32(UINT32_MAX)
680         };
681         int ret;
682         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
683
684         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
685                 return rte_flow_error_set(error, EPERM,
686                                           RTE_FLOW_ERROR_TYPE_ITEM,
687                                           NULL,
688                                           "match on metadata offload "
689                                           "configuration is off for this port");
690         if (!spec)
691                 return rte_flow_error_set(error, EINVAL,
692                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
693                                           item->spec,
694                                           "data cannot be empty");
695         if (!spec->data)
696                 return rte_flow_error_set(error, EINVAL,
697                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
698                                           NULL,
699                                           "data cannot be zero");
700         if (!mask)
701                 mask = &rte_flow_item_meta_mask;
702         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
703                                         (const uint8_t *)&nic_mask,
704                                         sizeof(struct rte_flow_item_meta),
705                                         error);
706         if (ret < 0)
707                 return ret;
708         if (attr->ingress)
709                 return rte_flow_error_set(error, ENOTSUP,
710                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
711                                           NULL,
712                                           "pattern not supported for ingress");
713         return 0;
714 }
715
716 /**
717  * Validate vport item.
718  *
719  * @param[in] dev
720  *   Pointer to the rte_eth_dev structure.
721  * @param[in] item
722  *   Item specification.
723  * @param[in] attr
724  *   Attributes of flow that includes this item.
725  * @param[in] item_flags
726  *   Bit-fields that holds the items detected until now.
727  * @param[out] error
728  *   Pointer to error structure.
729  *
730  * @return
731  *   0 on success, a negative errno value otherwise and rte_errno is set.
732  */
733 static int
734 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
735                               const struct rte_flow_item *item,
736                               const struct rte_flow_attr *attr,
737                               uint64_t item_flags,
738                               struct rte_flow_error *error)
739 {
740         const struct rte_flow_item_port_id *spec = item->spec;
741         const struct rte_flow_item_port_id *mask = item->mask;
742         const struct rte_flow_item_port_id switch_mask = {
743                         .id = 0xffffffff,
744         };
745         uint16_t esw_domain_id;
746         uint16_t item_port_esw_domain_id;
747         int ret;
748
749         if (!attr->transfer)
750                 return rte_flow_error_set(error, EINVAL,
751                                           RTE_FLOW_ERROR_TYPE_ITEM,
752                                           NULL,
753                                           "match on port id is valid only"
754                                           " when transfer flag is enabled");
755         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
756                 return rte_flow_error_set(error, ENOTSUP,
757                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
758                                           "multiple source ports are not"
759                                           " supported");
760         if (!mask)
761                 mask = &switch_mask;
762         if (mask->id != 0xffffffff)
763                 return rte_flow_error_set(error, ENOTSUP,
764                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
765                                            mask,
766                                            "no support for partial mask on"
767                                            " \"id\" field");
768         ret = mlx5_flow_item_acceptable
769                                 (item, (const uint8_t *)mask,
770                                  (const uint8_t *)&rte_flow_item_port_id_mask,
771                                  sizeof(struct rte_flow_item_port_id),
772                                  error);
773         if (ret)
774                 return ret;
775         if (!spec)
776                 return 0;
777         ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
778                                         NULL);
779         if (ret)
780                 return rte_flow_error_set(error, -ret,
781                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
782                                           "failed to obtain E-Switch info for"
783                                           " port");
784         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
785                                         &esw_domain_id, NULL);
786         if (ret < 0)
787                 return rte_flow_error_set(error, -ret,
788                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
789                                           NULL,
790                                           "failed to obtain E-Switch info");
791         if (item_port_esw_domain_id != esw_domain_id)
792                 return rte_flow_error_set(error, -ret,
793                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
794                                           "cannot match on a port from a"
795                                           " different E-Switch");
796         return 0;
797 }
798
799 /**
800  * Validate count action.
801  *
802  * @param[in] dev
803  *   device otr.
804  * @param[out] error
805  *   Pointer to error structure.
806  *
807  * @return
808  *   0 on success, a negative errno value otherwise and rte_errno is set.
809  */
810 static int
811 flow_dv_validate_action_count(struct rte_eth_dev *dev,
812                               struct rte_flow_error *error)
813 {
814         struct mlx5_priv *priv = dev->data->dev_private;
815
816         if (!priv->config.devx)
817                 goto notsup_err;
818 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
819         return 0;
820 #endif
821 notsup_err:
822         return rte_flow_error_set
823                       (error, ENOTSUP,
824                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
825                        NULL,
826                        "count action not supported");
827 }
828
829 /**
830  * Validate the L2 encap action.
831  *
832  * @param[in] action_flags
833  *   Holds the actions detected until now.
834  * @param[in] action
835  *   Pointer to the encap action.
836  * @param[in] attr
837  *   Pointer to flow attributes
838  * @param[out] error
839  *   Pointer to error structure.
840  *
841  * @return
842  *   0 on success, a negative errno value otherwise and rte_errno is set.
843  */
844 static int
845 flow_dv_validate_action_l2_encap(uint64_t action_flags,
846                                  const struct rte_flow_action *action,
847                                  const struct rte_flow_attr *attr,
848                                  struct rte_flow_error *error)
849 {
850         if (!(action->conf))
851                 return rte_flow_error_set(error, EINVAL,
852                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
853                                           "configuration cannot be null");
854         if (action_flags & MLX5_FLOW_ACTION_DROP)
855                 return rte_flow_error_set(error, EINVAL,
856                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
857                                           "can't drop and encap in same flow");
858         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
859                 return rte_flow_error_set(error, EINVAL,
860                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
861                                           "can only have a single encap or"
862                                           " decap action in a flow");
863         if (!attr->transfer && attr->ingress)
864                 return rte_flow_error_set(error, ENOTSUP,
865                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
866                                           NULL,
867                                           "encap action not supported for "
868                                           "ingress");
869         return 0;
870 }
871
872 /**
873  * Validate the L2 decap action.
874  *
875  * @param[in] action_flags
876  *   Holds the actions detected until now.
877  * @param[in] attr
878  *   Pointer to flow attributes
879  * @param[out] error
880  *   Pointer to error structure.
881  *
882  * @return
883  *   0 on success, a negative errno value otherwise and rte_errno is set.
884  */
885 static int
886 flow_dv_validate_action_l2_decap(uint64_t action_flags,
887                                  const struct rte_flow_attr *attr,
888                                  struct rte_flow_error *error)
889 {
890         if (action_flags & MLX5_FLOW_ACTION_DROP)
891                 return rte_flow_error_set(error, EINVAL,
892                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
893                                           "can't drop and decap in same flow");
894         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
895                 return rte_flow_error_set(error, EINVAL,
896                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
897                                           "can only have a single encap or"
898                                           " decap action in a flow");
899         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
900                 return rte_flow_error_set(error, EINVAL,
901                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
902                                           "can't have decap action after"
903                                           " modify action");
904         if (attr->egress)
905                 return rte_flow_error_set(error, ENOTSUP,
906                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
907                                           NULL,
908                                           "decap action not supported for "
909                                           "egress");
910         return 0;
911 }
912
913 /**
914  * Validate the raw encap action.
915  *
916  * @param[in] action_flags
917  *   Holds the actions detected until now.
918  * @param[in] action
919  *   Pointer to the encap action.
920  * @param[in] attr
921  *   Pointer to flow attributes
922  * @param[out] error
923  *   Pointer to error structure.
924  *
925  * @return
926  *   0 on success, a negative errno value otherwise and rte_errno is set.
927  */
928 static int
929 flow_dv_validate_action_raw_encap(uint64_t action_flags,
930                                   const struct rte_flow_action *action,
931                                   const struct rte_flow_attr *attr,
932                                   struct rte_flow_error *error)
933 {
934         if (!(action->conf))
935                 return rte_flow_error_set(error, EINVAL,
936                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
937                                           "configuration cannot be null");
938         if (action_flags & MLX5_FLOW_ACTION_DROP)
939                 return rte_flow_error_set(error, EINVAL,
940                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
941                                           "can't drop and encap in same flow");
942         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
943                 return rte_flow_error_set(error, EINVAL,
944                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
945                                           "can only have a single encap"
946                                           " action in a flow");
947         /* encap without preceding decap is not supported for ingress */
948         if (!attr->transfer &&  attr->ingress &&
949             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
950                 return rte_flow_error_set(error, ENOTSUP,
951                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
952                                           NULL,
953                                           "encap action not supported for "
954                                           "ingress");
955         return 0;
956 }
957
958 /**
959  * Validate the raw decap action.
960  *
961  * @param[in] action_flags
962  *   Holds the actions detected until now.
963  * @param[in] action
964  *   Pointer to the encap action.
965  * @param[in] attr
966  *   Pointer to flow attributes
967  * @param[out] error
968  *   Pointer to error structure.
969  *
970  * @return
971  *   0 on success, a negative errno value otherwise and rte_errno is set.
972  */
973 static int
974 flow_dv_validate_action_raw_decap(uint64_t action_flags,
975                                   const struct rte_flow_action *action,
976                                   const struct rte_flow_attr *attr,
977                                   struct rte_flow_error *error)
978 {
979         if (action_flags & MLX5_FLOW_ACTION_DROP)
980                 return rte_flow_error_set(error, EINVAL,
981                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
982                                           "can't drop and decap in same flow");
983         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
984                 return rte_flow_error_set(error, EINVAL,
985                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
986                                           "can't have encap action before"
987                                           " decap action");
988         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
989                 return rte_flow_error_set(error, EINVAL,
990                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
991                                           "can only have a single decap"
992                                           " action in a flow");
993         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
994                 return rte_flow_error_set(error, EINVAL,
995                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
996                                           "can't have decap action after"
997                                           " modify action");
998         /* decap action is valid on egress only if it is followed by encap */
999         if (attr->egress) {
1000                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1001                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1002                        action++) {
1003                 }
1004                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1005                         return rte_flow_error_set
1006                                         (error, ENOTSUP,
1007                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1008                                          NULL, "decap action not supported"
1009                                          " for egress");
1010         }
1011         return 0;
1012 }
1013
1014 /**
1015  * Find existing encap/decap resource or create and register a new one.
1016  *
1017  * @param dev[in, out]
1018  *   Pointer to rte_eth_dev structure.
1019  * @param[in, out] resource
1020  *   Pointer to encap/decap resource.
1021  * @parm[in, out] dev_flow
1022  *   Pointer to the dev_flow.
1023  * @param[out] error
1024  *   pointer to error structure.
1025  *
1026  * @return
1027  *   0 on success otherwise -errno and errno is set.
1028  */
1029 static int
1030 flow_dv_encap_decap_resource_register
1031                         (struct rte_eth_dev *dev,
1032                          struct mlx5_flow_dv_encap_decap_resource *resource,
1033                          struct mlx5_flow *dev_flow,
1034                          struct rte_flow_error *error)
1035 {
1036         struct mlx5_priv *priv = dev->data->dev_private;
1037         struct mlx5_ibv_shared *sh = priv->sh;
1038         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1039         struct rte_flow *flow = dev_flow->flow;
1040         struct mlx5dv_dr_domain *domain;
1041
1042         resource->flags = flow->group ? 0 : 1;
1043         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1044                 domain = sh->fdb_domain;
1045         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1046                 domain = sh->rx_domain;
1047         else
1048                 domain = sh->tx_domain;
1049
1050         /* Lookup a matching resource from cache. */
1051         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1052                 if (resource->reformat_type == cache_resource->reformat_type &&
1053                     resource->ft_type == cache_resource->ft_type &&
1054                     resource->flags == cache_resource->flags &&
1055                     resource->size == cache_resource->size &&
1056                     !memcmp((const void *)resource->buf,
1057                             (const void *)cache_resource->buf,
1058                             resource->size)) {
1059                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1060                                 (void *)cache_resource,
1061                                 rte_atomic32_read(&cache_resource->refcnt));
1062                         rte_atomic32_inc(&cache_resource->refcnt);
1063                         dev_flow->dv.encap_decap = cache_resource;
1064                         return 0;
1065                 }
1066         }
1067         /* Register new encap/decap resource. */
1068         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1069         if (!cache_resource)
1070                 return rte_flow_error_set(error, ENOMEM,
1071                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1072                                           "cannot allocate resource memory");
1073         *cache_resource = *resource;
1074         cache_resource->verbs_action =
1075                 mlx5_glue->dv_create_flow_action_packet_reformat
1076                         (sh->ctx, cache_resource->reformat_type,
1077                          cache_resource->ft_type, domain, cache_resource->flags,
1078                          cache_resource->size,
1079                          (cache_resource->size ? cache_resource->buf : NULL));
1080         if (!cache_resource->verbs_action) {
1081                 rte_free(cache_resource);
1082                 return rte_flow_error_set(error, ENOMEM,
1083                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1084                                           NULL, "cannot create action");
1085         }
1086         rte_atomic32_init(&cache_resource->refcnt);
1087         rte_atomic32_inc(&cache_resource->refcnt);
1088         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1089         dev_flow->dv.encap_decap = cache_resource;
1090         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1091                 (void *)cache_resource,
1092                 rte_atomic32_read(&cache_resource->refcnt));
1093         return 0;
1094 }
1095
1096 /**
1097  * Find existing table jump resource or create and register a new one.
1098  *
1099  * @param dev[in, out]
1100  *   Pointer to rte_eth_dev structure.
1101  * @param[in, out] resource
1102  *   Pointer to jump table resource.
1103  * @parm[in, out] dev_flow
1104  *   Pointer to the dev_flow.
1105  * @param[out] error
1106  *   pointer to error structure.
1107  *
1108  * @return
1109  *   0 on success otherwise -errno and errno is set.
1110  */
1111 static int
1112 flow_dv_jump_tbl_resource_register
1113                         (struct rte_eth_dev *dev,
1114                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1115                          struct mlx5_flow *dev_flow,
1116                          struct rte_flow_error *error)
1117 {
1118         struct mlx5_priv *priv = dev->data->dev_private;
1119         struct mlx5_ibv_shared *sh = priv->sh;
1120         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1121
1122         /* Lookup a matching resource from cache. */
1123         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1124                 if (resource->tbl == cache_resource->tbl) {
1125                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1126                                 (void *)cache_resource,
1127                                 rte_atomic32_read(&cache_resource->refcnt));
1128                         rte_atomic32_inc(&cache_resource->refcnt);
1129                         dev_flow->dv.jump = cache_resource;
1130                         return 0;
1131                 }
1132         }
1133         /* Register new jump table resource. */
1134         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1135         if (!cache_resource)
1136                 return rte_flow_error_set(error, ENOMEM,
1137                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1138                                           "cannot allocate resource memory");
1139         *cache_resource = *resource;
1140         cache_resource->action =
1141                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1142                 (resource->tbl->obj);
1143         if (!cache_resource->action) {
1144                 rte_free(cache_resource);
1145                 return rte_flow_error_set(error, ENOMEM,
1146                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1147                                           NULL, "cannot create action");
1148         }
1149         rte_atomic32_init(&cache_resource->refcnt);
1150         rte_atomic32_inc(&cache_resource->refcnt);
1151         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1152         dev_flow->dv.jump = cache_resource;
1153         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1154                 (void *)cache_resource,
1155                 rte_atomic32_read(&cache_resource->refcnt));
1156         return 0;
1157 }
1158
1159 /**
1160  * Find existing table port ID resource or create and register a new one.
1161  *
1162  * @param dev[in, out]
1163  *   Pointer to rte_eth_dev structure.
1164  * @param[in, out] resource
1165  *   Pointer to port ID action resource.
1166  * @parm[in, out] dev_flow
1167  *   Pointer to the dev_flow.
1168  * @param[out] error
1169  *   pointer to error structure.
1170  *
1171  * @return
1172  *   0 on success otherwise -errno and errno is set.
1173  */
1174 static int
1175 flow_dv_port_id_action_resource_register
1176                         (struct rte_eth_dev *dev,
1177                          struct mlx5_flow_dv_port_id_action_resource *resource,
1178                          struct mlx5_flow *dev_flow,
1179                          struct rte_flow_error *error)
1180 {
1181         struct mlx5_priv *priv = dev->data->dev_private;
1182         struct mlx5_ibv_shared *sh = priv->sh;
1183         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1184
1185         /* Lookup a matching resource from cache. */
1186         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1187                 if (resource->port_id == cache_resource->port_id) {
1188                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1189                                 "refcnt %d++",
1190                                 (void *)cache_resource,
1191                                 rte_atomic32_read(&cache_resource->refcnt));
1192                         rte_atomic32_inc(&cache_resource->refcnt);
1193                         dev_flow->dv.port_id_action = cache_resource;
1194                         return 0;
1195                 }
1196         }
1197         /* Register new port id action resource. */
1198         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1199         if (!cache_resource)
1200                 return rte_flow_error_set(error, ENOMEM,
1201                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1202                                           "cannot allocate resource memory");
1203         *cache_resource = *resource;
1204         cache_resource->action =
1205                 mlx5_glue->dr_create_flow_action_dest_vport
1206                         (priv->sh->fdb_domain, resource->port_id);
1207         if (!cache_resource->action) {
1208                 rte_free(cache_resource);
1209                 return rte_flow_error_set(error, ENOMEM,
1210                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1211                                           NULL, "cannot create action");
1212         }
1213         rte_atomic32_init(&cache_resource->refcnt);
1214         rte_atomic32_inc(&cache_resource->refcnt);
1215         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1216         dev_flow->dv.port_id_action = cache_resource;
1217         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1218                 (void *)cache_resource,
1219                 rte_atomic32_read(&cache_resource->refcnt));
1220         return 0;
1221 }
1222
1223 /**
1224  * Get the size of specific rte_flow_item_type
1225  *
1226  * @param[in] item_type
1227  *   Tested rte_flow_item_type.
1228  *
1229  * @return
1230  *   sizeof struct item_type, 0 if void or irrelevant.
1231  */
1232 static size_t
1233 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1234 {
1235         size_t retval;
1236
1237         switch (item_type) {
1238         case RTE_FLOW_ITEM_TYPE_ETH:
1239                 retval = sizeof(struct rte_flow_item_eth);
1240                 break;
1241         case RTE_FLOW_ITEM_TYPE_VLAN:
1242                 retval = sizeof(struct rte_flow_item_vlan);
1243                 break;
1244         case RTE_FLOW_ITEM_TYPE_IPV4:
1245                 retval = sizeof(struct rte_flow_item_ipv4);
1246                 break;
1247         case RTE_FLOW_ITEM_TYPE_IPV6:
1248                 retval = sizeof(struct rte_flow_item_ipv6);
1249                 break;
1250         case RTE_FLOW_ITEM_TYPE_UDP:
1251                 retval = sizeof(struct rte_flow_item_udp);
1252                 break;
1253         case RTE_FLOW_ITEM_TYPE_TCP:
1254                 retval = sizeof(struct rte_flow_item_tcp);
1255                 break;
1256         case RTE_FLOW_ITEM_TYPE_VXLAN:
1257                 retval = sizeof(struct rte_flow_item_vxlan);
1258                 break;
1259         case RTE_FLOW_ITEM_TYPE_GRE:
1260                 retval = sizeof(struct rte_flow_item_gre);
1261                 break;
1262         case RTE_FLOW_ITEM_TYPE_NVGRE:
1263                 retval = sizeof(struct rte_flow_item_nvgre);
1264                 break;
1265         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1266                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1267                 break;
1268         case RTE_FLOW_ITEM_TYPE_MPLS:
1269                 retval = sizeof(struct rte_flow_item_mpls);
1270                 break;
1271         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1272         default:
1273                 retval = 0;
1274                 break;
1275         }
1276         return retval;
1277 }
1278
1279 #define MLX5_ENCAP_IPV4_VERSION         0x40
1280 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1281 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1282 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1283 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1284 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1285 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1286
1287 /**
1288  * Convert the encap action data from list of rte_flow_item to raw buffer
1289  *
1290  * @param[in] items
1291  *   Pointer to rte_flow_item objects list.
1292  * @param[out] buf
1293  *   Pointer to the output buffer.
1294  * @param[out] size
1295  *   Pointer to the output buffer size.
1296  * @param[out] error
1297  *   Pointer to the error structure.
1298  *
1299  * @return
1300  *   0 on success, a negative errno value otherwise and rte_errno is set.
1301  */
1302 static int
1303 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1304                            size_t *size, struct rte_flow_error *error)
1305 {
1306         struct rte_ether_hdr *eth = NULL;
1307         struct rte_vlan_hdr *vlan = NULL;
1308         struct rte_ipv4_hdr *ipv4 = NULL;
1309         struct rte_ipv6_hdr *ipv6 = NULL;
1310         struct rte_udp_hdr *udp = NULL;
1311         struct rte_vxlan_hdr *vxlan = NULL;
1312         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1313         struct rte_gre_hdr *gre = NULL;
1314         size_t len;
1315         size_t temp_size = 0;
1316
1317         if (!items)
1318                 return rte_flow_error_set(error, EINVAL,
1319                                           RTE_FLOW_ERROR_TYPE_ACTION,
1320                                           NULL, "invalid empty data");
1321         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1322                 len = flow_dv_get_item_len(items->type);
1323                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1324                         return rte_flow_error_set(error, EINVAL,
1325                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1326                                                   (void *)items->type,
1327                                                   "items total size is too big"
1328                                                   " for encap action");
1329                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1330                 switch (items->type) {
1331                 case RTE_FLOW_ITEM_TYPE_ETH:
1332                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1333                         break;
1334                 case RTE_FLOW_ITEM_TYPE_VLAN:
1335                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1336                         if (!eth)
1337                                 return rte_flow_error_set(error, EINVAL,
1338                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1339                                                 (void *)items->type,
1340                                                 "eth header not found");
1341                         if (!eth->ether_type)
1342                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1343                         break;
1344                 case RTE_FLOW_ITEM_TYPE_IPV4:
1345                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1346                         if (!vlan && !eth)
1347                                 return rte_flow_error_set(error, EINVAL,
1348                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1349                                                 (void *)items->type,
1350                                                 "neither eth nor vlan"
1351                                                 " header found");
1352                         if (vlan && !vlan->eth_proto)
1353                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1354                         else if (eth && !eth->ether_type)
1355                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1356                         if (!ipv4->version_ihl)
1357                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1358                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1359                         if (!ipv4->time_to_live)
1360                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1361                         break;
1362                 case RTE_FLOW_ITEM_TYPE_IPV6:
1363                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1364                         if (!vlan && !eth)
1365                                 return rte_flow_error_set(error, EINVAL,
1366                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1367                                                 (void *)items->type,
1368                                                 "neither eth nor vlan"
1369                                                 " header found");
1370                         if (vlan && !vlan->eth_proto)
1371                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1372                         else if (eth && !eth->ether_type)
1373                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1374                         if (!ipv6->vtc_flow)
1375                                 ipv6->vtc_flow =
1376                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1377                         if (!ipv6->hop_limits)
1378                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1379                         break;
1380                 case RTE_FLOW_ITEM_TYPE_UDP:
1381                         udp = (struct rte_udp_hdr *)&buf[temp_size];
1382                         if (!ipv4 && !ipv6)
1383                                 return rte_flow_error_set(error, EINVAL,
1384                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1385                                                 (void *)items->type,
1386                                                 "ip header not found");
1387                         if (ipv4 && !ipv4->next_proto_id)
1388                                 ipv4->next_proto_id = IPPROTO_UDP;
1389                         else if (ipv6 && !ipv6->proto)
1390                                 ipv6->proto = IPPROTO_UDP;
1391                         break;
1392                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1393                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1394                         if (!udp)
1395                                 return rte_flow_error_set(error, EINVAL,
1396                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1397                                                 (void *)items->type,
1398                                                 "udp header not found");
1399                         if (!udp->dst_port)
1400                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1401                         if (!vxlan->vx_flags)
1402                                 vxlan->vx_flags =
1403                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1404                         break;
1405                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1406                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1407                         if (!udp)
1408                                 return rte_flow_error_set(error, EINVAL,
1409                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1410                                                 (void *)items->type,
1411                                                 "udp header not found");
1412                         if (!vxlan_gpe->proto)
1413                                 return rte_flow_error_set(error, EINVAL,
1414                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1415                                                 (void *)items->type,
1416                                                 "next protocol not found");
1417                         if (!udp->dst_port)
1418                                 udp->dst_port =
1419                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1420                         if (!vxlan_gpe->vx_flags)
1421                                 vxlan_gpe->vx_flags =
1422                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1423                         break;
1424                 case RTE_FLOW_ITEM_TYPE_GRE:
1425                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1426                         gre = (struct rte_gre_hdr *)&buf[temp_size];
1427                         if (!gre->proto)
1428                                 return rte_flow_error_set(error, EINVAL,
1429                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1430                                                 (void *)items->type,
1431                                                 "next protocol not found");
1432                         if (!ipv4 && !ipv6)
1433                                 return rte_flow_error_set(error, EINVAL,
1434                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1435                                                 (void *)items->type,
1436                                                 "ip header not found");
1437                         if (ipv4 && !ipv4->next_proto_id)
1438                                 ipv4->next_proto_id = IPPROTO_GRE;
1439                         else if (ipv6 && !ipv6->proto)
1440                                 ipv6->proto = IPPROTO_GRE;
1441                         break;
1442                 case RTE_FLOW_ITEM_TYPE_VOID:
1443                         break;
1444                 default:
1445                         return rte_flow_error_set(error, EINVAL,
1446                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1447                                                   (void *)items->type,
1448                                                   "unsupported item type");
1449                         break;
1450                 }
1451                 temp_size += len;
1452         }
1453         *size = temp_size;
1454         return 0;
1455 }
1456
1457 /**
1458  * Convert L2 encap action to DV specification.
1459  *
1460  * @param[in] dev
1461  *   Pointer to rte_eth_dev structure.
1462  * @param[in] action
1463  *   Pointer to action structure.
1464  * @param[in, out] dev_flow
1465  *   Pointer to the mlx5_flow.
1466  * @param[in] transfer
1467  *   Mark if the flow is E-Switch flow.
1468  * @param[out] error
1469  *   Pointer to the error structure.
1470  *
1471  * @return
1472  *   0 on success, a negative errno value otherwise and rte_errno is set.
1473  */
1474 static int
1475 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1476                                const struct rte_flow_action *action,
1477                                struct mlx5_flow *dev_flow,
1478                                uint8_t transfer,
1479                                struct rte_flow_error *error)
1480 {
1481         const struct rte_flow_item *encap_data;
1482         const struct rte_flow_action_raw_encap *raw_encap_data;
1483         struct mlx5_flow_dv_encap_decap_resource res = {
1484                 .reformat_type =
1485                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1486                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1487                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1488         };
1489
1490         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1491                 raw_encap_data =
1492                         (const struct rte_flow_action_raw_encap *)action->conf;
1493                 res.size = raw_encap_data->size;
1494                 memcpy(res.buf, raw_encap_data->data, res.size);
1495         } else {
1496                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1497                         encap_data =
1498                                 ((const struct rte_flow_action_vxlan_encap *)
1499                                                 action->conf)->definition;
1500                 else
1501                         encap_data =
1502                                 ((const struct rte_flow_action_nvgre_encap *)
1503                                                 action->conf)->definition;
1504                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1505                                                &res.size, error))
1506                         return -rte_errno;
1507         }
1508         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1509                 return rte_flow_error_set(error, EINVAL,
1510                                           RTE_FLOW_ERROR_TYPE_ACTION,
1511                                           NULL, "can't create L2 encap action");
1512         return 0;
1513 }
1514
1515 /**
1516  * Convert L2 decap action to DV specification.
1517  *
1518  * @param[in] dev
1519  *   Pointer to rte_eth_dev structure.
1520  * @param[in, out] dev_flow
1521  *   Pointer to the mlx5_flow.
1522  * @param[in] transfer
1523  *   Mark if the flow is E-Switch flow.
1524  * @param[out] error
1525  *   Pointer to the error structure.
1526  *
1527  * @return
1528  *   0 on success, a negative errno value otherwise and rte_errno is set.
1529  */
1530 static int
1531 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1532                                struct mlx5_flow *dev_flow,
1533                                uint8_t transfer,
1534                                struct rte_flow_error *error)
1535 {
1536         struct mlx5_flow_dv_encap_decap_resource res = {
1537                 .size = 0,
1538                 .reformat_type =
1539                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1540                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1541                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1542         };
1543
1544         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1545                 return rte_flow_error_set(error, EINVAL,
1546                                           RTE_FLOW_ERROR_TYPE_ACTION,
1547                                           NULL, "can't create L2 decap action");
1548         return 0;
1549 }
1550
1551 /**
1552  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1553  *
1554  * @param[in] dev
1555  *   Pointer to rte_eth_dev structure.
1556  * @param[in] action
1557  *   Pointer to action structure.
1558  * @param[in, out] dev_flow
1559  *   Pointer to the mlx5_flow.
1560  * @param[in] attr
1561  *   Pointer to the flow attributes.
1562  * @param[out] error
1563  *   Pointer to the error structure.
1564  *
1565  * @return
1566  *   0 on success, a negative errno value otherwise and rte_errno is set.
1567  */
1568 static int
1569 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1570                                 const struct rte_flow_action *action,
1571                                 struct mlx5_flow *dev_flow,
1572                                 const struct rte_flow_attr *attr,
1573                                 struct rte_flow_error *error)
1574 {
1575         const struct rte_flow_action_raw_encap *encap_data;
1576         struct mlx5_flow_dv_encap_decap_resource res;
1577
1578         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1579         res.size = encap_data->size;
1580         memcpy(res.buf, encap_data->data, res.size);
1581         res.reformat_type = attr->egress ?
1582                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1583                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1584         if (attr->transfer)
1585                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1586         else
1587                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1588                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1589         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1590                 return rte_flow_error_set(error, EINVAL,
1591                                           RTE_FLOW_ERROR_TYPE_ACTION,
1592                                           NULL, "can't create encap action");
1593         return 0;
1594 }
1595
1596 /**
1597  * Validate the modify-header actions.
1598  *
1599  * @param[in] action_flags
1600  *   Holds the actions detected until now.
1601  * @param[in] action
1602  *   Pointer to the modify action.
1603  * @param[out] error
1604  *   Pointer to error structure.
1605  *
1606  * @return
1607  *   0 on success, a negative errno value otherwise and rte_errno is set.
1608  */
1609 static int
1610 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1611                                    const struct rte_flow_action *action,
1612                                    struct rte_flow_error *error)
1613 {
1614         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1615                 return rte_flow_error_set(error, EINVAL,
1616                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1617                                           NULL, "action configuration not set");
1618         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1619                 return rte_flow_error_set(error, EINVAL,
1620                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1621                                           "can't have encap action before"
1622                                           " modify action");
1623         return 0;
1624 }
1625
1626 /**
1627  * Validate the modify-header MAC address actions.
1628  *
1629  * @param[in] action_flags
1630  *   Holds the actions detected until now.
1631  * @param[in] action
1632  *   Pointer to the modify action.
1633  * @param[in] item_flags
1634  *   Holds the items detected.
1635  * @param[out] error
1636  *   Pointer to error structure.
1637  *
1638  * @return
1639  *   0 on success, a negative errno value otherwise and rte_errno is set.
1640  */
1641 static int
1642 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1643                                    const struct rte_flow_action *action,
1644                                    const uint64_t item_flags,
1645                                    struct rte_flow_error *error)
1646 {
1647         int ret = 0;
1648
1649         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1650         if (!ret) {
1651                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1652                         return rte_flow_error_set(error, EINVAL,
1653                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1654                                                   NULL,
1655                                                   "no L2 item in pattern");
1656         }
1657         return ret;
1658 }
1659
1660 /**
1661  * Validate the modify-header IPv4 address actions.
1662  *
1663  * @param[in] action_flags
1664  *   Holds the actions detected until now.
1665  * @param[in] action
1666  *   Pointer to the modify action.
1667  * @param[in] item_flags
1668  *   Holds the items detected.
1669  * @param[out] error
1670  *   Pointer to error structure.
1671  *
1672  * @return
1673  *   0 on success, a negative errno value otherwise and rte_errno is set.
1674  */
1675 static int
1676 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1677                                     const struct rte_flow_action *action,
1678                                     const uint64_t item_flags,
1679                                     struct rte_flow_error *error)
1680 {
1681         int ret = 0;
1682
1683         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1684         if (!ret) {
1685                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1686                         return rte_flow_error_set(error, EINVAL,
1687                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1688                                                   NULL,
1689                                                   "no ipv4 item in pattern");
1690         }
1691         return ret;
1692 }
1693
1694 /**
1695  * Validate the modify-header IPv6 address actions.
1696  *
1697  * @param[in] action_flags
1698  *   Holds the actions detected until now.
1699  * @param[in] action
1700  *   Pointer to the modify action.
1701  * @param[in] item_flags
1702  *   Holds the items detected.
1703  * @param[out] error
1704  *   Pointer to error structure.
1705  *
1706  * @return
1707  *   0 on success, a negative errno value otherwise and rte_errno is set.
1708  */
1709 static int
1710 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1711                                     const struct rte_flow_action *action,
1712                                     const uint64_t item_flags,
1713                                     struct rte_flow_error *error)
1714 {
1715         int ret = 0;
1716
1717         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1718         if (!ret) {
1719                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1720                         return rte_flow_error_set(error, EINVAL,
1721                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1722                                                   NULL,
1723                                                   "no ipv6 item in pattern");
1724         }
1725         return ret;
1726 }
1727
1728 /**
1729  * Validate the modify-header TP actions.
1730  *
1731  * @param[in] action_flags
1732  *   Holds the actions detected until now.
1733  * @param[in] action
1734  *   Pointer to the modify action.
1735  * @param[in] item_flags
1736  *   Holds the items detected.
1737  * @param[out] error
1738  *   Pointer to error structure.
1739  *
1740  * @return
1741  *   0 on success, a negative errno value otherwise and rte_errno is set.
1742  */
1743 static int
1744 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1745                                   const struct rte_flow_action *action,
1746                                   const uint64_t item_flags,
1747                                   struct rte_flow_error *error)
1748 {
1749         int ret = 0;
1750
1751         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1752         if (!ret) {
1753                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1754                         return rte_flow_error_set(error, EINVAL,
1755                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1756                                                   NULL, "no transport layer "
1757                                                   "in pattern");
1758         }
1759         return ret;
1760 }
1761
1762 /**
1763  * Validate the modify-header actions of increment/decrement
1764  * TCP Sequence-number.
1765  *
1766  * @param[in] action_flags
1767  *   Holds the actions detected until now.
1768  * @param[in] action
1769  *   Pointer to the modify action.
1770  * @param[in] item_flags
1771  *   Holds the items detected.
1772  * @param[out] error
1773  *   Pointer to error structure.
1774  *
1775  * @return
1776  *   0 on success, a negative errno value otherwise and rte_errno is set.
1777  */
1778 static int
1779 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
1780                                        const struct rte_flow_action *action,
1781                                        const uint64_t item_flags,
1782                                        struct rte_flow_error *error)
1783 {
1784         int ret = 0;
1785
1786         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1787         if (!ret) {
1788                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1789                         return rte_flow_error_set(error, EINVAL,
1790                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1791                                                   NULL, "no TCP item in"
1792                                                   " pattern");
1793                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
1794                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
1795                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
1796                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
1797                         return rte_flow_error_set(error, EINVAL,
1798                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1799                                                   NULL,
1800                                                   "cannot decrease and increase"
1801                                                   " TCP sequence number"
1802                                                   " at the same time");
1803         }
1804         return ret;
1805 }
1806
1807 /**
1808  * Validate the modify-header actions of increment/decrement
1809  * TCP Acknowledgment number.
1810  *
1811  * @param[in] action_flags
1812  *   Holds the actions detected until now.
1813  * @param[in] action
1814  *   Pointer to the modify action.
1815  * @param[in] item_flags
1816  *   Holds the items detected.
1817  * @param[out] error
1818  *   Pointer to error structure.
1819  *
1820  * @return
1821  *   0 on success, a negative errno value otherwise and rte_errno is set.
1822  */
1823 static int
1824 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
1825                                        const struct rte_flow_action *action,
1826                                        const uint64_t item_flags,
1827                                        struct rte_flow_error *error)
1828 {
1829         int ret = 0;
1830
1831         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1832         if (!ret) {
1833                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1834                         return rte_flow_error_set(error, EINVAL,
1835                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1836                                                   NULL, "no TCP item in"
1837                                                   " pattern");
1838                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
1839                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
1840                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
1841                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
1842                         return rte_flow_error_set(error, EINVAL,
1843                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1844                                                   NULL,
1845                                                   "cannot decrease and increase"
1846                                                   " TCP acknowledgment number"
1847                                                   " at the same time");
1848         }
1849         return ret;
1850 }
1851
1852 /**
1853  * Validate the modify-header TTL actions.
1854  *
1855  * @param[in] action_flags
1856  *   Holds the actions detected until now.
1857  * @param[in] action
1858  *   Pointer to the modify action.
1859  * @param[in] item_flags
1860  *   Holds the items detected.
1861  * @param[out] error
1862  *   Pointer to error structure.
1863  *
1864  * @return
1865  *   0 on success, a negative errno value otherwise and rte_errno is set.
1866  */
1867 static int
1868 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1869                                    const struct rte_flow_action *action,
1870                                    const uint64_t item_flags,
1871                                    struct rte_flow_error *error)
1872 {
1873         int ret = 0;
1874
1875         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1876         if (!ret) {
1877                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1878                         return rte_flow_error_set(error, EINVAL,
1879                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1880                                                   NULL,
1881                                                   "no IP protocol in pattern");
1882         }
1883         return ret;
1884 }
1885
1886 /**
1887  * Validate jump action.
1888  *
1889  * @param[in] action
1890  *   Pointer to the modify action.
1891  * @param[in] group
1892  *   The group of the current flow.
1893  * @param[out] error
1894  *   Pointer to error structure.
1895  *
1896  * @return
1897  *   0 on success, a negative errno value otherwise and rte_errno is set.
1898  */
1899 static int
1900 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1901                              uint32_t group,
1902                              struct rte_flow_error *error)
1903 {
1904         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1905                 return rte_flow_error_set(error, EINVAL,
1906                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1907                                           NULL, "action configuration not set");
1908         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1909                 return rte_flow_error_set(error, EINVAL,
1910                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1911                                           "target group must be higher then"
1912                                           " the current flow group");
1913         return 0;
1914 }
1915
1916 /*
1917  * Validate the port_id action.
1918  *
1919  * @param[in] dev
1920  *   Pointer to rte_eth_dev structure.
1921  * @param[in] action_flags
1922  *   Bit-fields that holds the actions detected until now.
1923  * @param[in] action
1924  *   Port_id RTE action structure.
1925  * @param[in] attr
1926  *   Attributes of flow that includes this action.
1927  * @param[out] error
1928  *   Pointer to error structure.
1929  *
1930  * @return
1931  *   0 on success, a negative errno value otherwise and rte_errno is set.
1932  */
1933 static int
1934 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
1935                                 uint64_t action_flags,
1936                                 const struct rte_flow_action *action,
1937                                 const struct rte_flow_attr *attr,
1938                                 struct rte_flow_error *error)
1939 {
1940         const struct rte_flow_action_port_id *port_id;
1941         uint16_t port;
1942         uint16_t esw_domain_id;
1943         uint16_t act_port_domain_id;
1944         int ret;
1945
1946         if (!attr->transfer)
1947                 return rte_flow_error_set(error, ENOTSUP,
1948                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1949                                           NULL,
1950                                           "port id action is valid in transfer"
1951                                           " mode only");
1952         if (!action || !action->conf)
1953                 return rte_flow_error_set(error, ENOTSUP,
1954                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1955                                           NULL,
1956                                           "port id action parameters must be"
1957                                           " specified");
1958         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
1959                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
1960                 return rte_flow_error_set(error, EINVAL,
1961                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1962                                           "can have only one fate actions in"
1963                                           " a flow");
1964         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
1965                                         &esw_domain_id, NULL);
1966         if (ret < 0)
1967                 return rte_flow_error_set(error, -ret,
1968                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1969                                           NULL,
1970                                           "failed to obtain E-Switch info");
1971         port_id = action->conf;
1972         port = port_id->original ? dev->data->port_id : port_id->id;
1973         ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
1974         if (ret)
1975                 return rte_flow_error_set
1976                                 (error, -ret,
1977                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
1978                                  "failed to obtain E-Switch port id for port");
1979         if (act_port_domain_id != esw_domain_id)
1980                 return rte_flow_error_set
1981                                 (error, -ret,
1982                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1983                                  "port does not belong to"
1984                                  " E-Switch being configured");
1985         return 0;
1986 }
1987
1988 /**
1989  * Find existing modify-header resource or create and register a new one.
1990  *
1991  * @param dev[in, out]
1992  *   Pointer to rte_eth_dev structure.
1993  * @param[in, out] resource
1994  *   Pointer to modify-header resource.
1995  * @parm[in, out] dev_flow
1996  *   Pointer to the dev_flow.
1997  * @param[out] error
1998  *   pointer to error structure.
1999  *
2000  * @return
2001  *   0 on success otherwise -errno and errno is set.
2002  */
2003 static int
2004 flow_dv_modify_hdr_resource_register
2005                         (struct rte_eth_dev *dev,
2006                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2007                          struct mlx5_flow *dev_flow,
2008                          struct rte_flow_error *error)
2009 {
2010         struct mlx5_priv *priv = dev->data->dev_private;
2011         struct mlx5_ibv_shared *sh = priv->sh;
2012         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2013         struct mlx5dv_dr_domain *ns;
2014
2015         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2016                 ns = sh->fdb_domain;
2017         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2018                 ns = sh->tx_domain;
2019         else
2020                 ns = sh->rx_domain;
2021         resource->flags =
2022                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2023         /* Lookup a matching resource from cache. */
2024         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2025                 if (resource->ft_type == cache_resource->ft_type &&
2026                     resource->actions_num == cache_resource->actions_num &&
2027                     resource->flags == cache_resource->flags &&
2028                     !memcmp((const void *)resource->actions,
2029                             (const void *)cache_resource->actions,
2030                             (resource->actions_num *
2031                                             sizeof(resource->actions[0])))) {
2032                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2033                                 (void *)cache_resource,
2034                                 rte_atomic32_read(&cache_resource->refcnt));
2035                         rte_atomic32_inc(&cache_resource->refcnt);
2036                         dev_flow->dv.modify_hdr = cache_resource;
2037                         return 0;
2038                 }
2039         }
2040         /* Register new modify-header resource. */
2041         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2042         if (!cache_resource)
2043                 return rte_flow_error_set(error, ENOMEM,
2044                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2045                                           "cannot allocate resource memory");
2046         *cache_resource = *resource;
2047         cache_resource->verbs_action =
2048                 mlx5_glue->dv_create_flow_action_modify_header
2049                                         (sh->ctx, cache_resource->ft_type,
2050                                          ns, cache_resource->flags,
2051                                          cache_resource->actions_num *
2052                                          sizeof(cache_resource->actions[0]),
2053                                          (uint64_t *)cache_resource->actions);
2054         if (!cache_resource->verbs_action) {
2055                 rte_free(cache_resource);
2056                 return rte_flow_error_set(error, ENOMEM,
2057                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2058                                           NULL, "cannot create action");
2059         }
2060         rte_atomic32_init(&cache_resource->refcnt);
2061         rte_atomic32_inc(&cache_resource->refcnt);
2062         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2063         dev_flow->dv.modify_hdr = cache_resource;
2064         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2065                 (void *)cache_resource,
2066                 rte_atomic32_read(&cache_resource->refcnt));
2067         return 0;
2068 }
2069
2070 /**
2071  * Get or create a flow counter.
2072  *
2073  * @param[in] dev
2074  *   Pointer to the Ethernet device structure.
2075  * @param[in] shared
2076  *   Indicate if this counter is shared with other flows.
2077  * @param[in] id
2078  *   Counter identifier.
2079  *
2080  * @return
2081  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2082  */
2083 static struct mlx5_flow_counter *
2084 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
2085 {
2086         struct mlx5_priv *priv = dev->data->dev_private;
2087         struct mlx5_flow_counter *cnt = NULL;
2088         struct mlx5_devx_counter_set *dcs = NULL;
2089         int ret;
2090
2091         if (!priv->config.devx) {
2092                 ret = -ENOTSUP;
2093                 goto error_exit;
2094         }
2095         if (shared) {
2096                 LIST_FOREACH(cnt, &priv->flow_counters, next) {
2097                         if (cnt->shared && cnt->id == id) {
2098                                 cnt->ref_cnt++;
2099                                 return cnt;
2100                         }
2101                 }
2102         }
2103         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2104         dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
2105         if (!dcs || !cnt) {
2106                 ret = -ENOMEM;
2107                 goto error_exit;
2108         }
2109         ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
2110         if (ret)
2111                 goto error_exit;
2112         struct mlx5_flow_counter tmpl = {
2113                 .shared = shared,
2114                 .ref_cnt = 1,
2115                 .id = id,
2116                 .dcs = dcs,
2117         };
2118         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2119         if (!tmpl.action) {
2120                 ret = errno;
2121                 goto error_exit;
2122         }
2123         *cnt = tmpl;
2124         LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
2125         return cnt;
2126 error_exit:
2127         rte_free(cnt);
2128         rte_free(dcs);
2129         rte_errno = -ret;
2130         return NULL;
2131 }
2132
2133 /**
2134  * Release a flow counter.
2135  *
2136  * @param[in] counter
2137  *   Pointer to the counter handler.
2138  */
2139 static void
2140 flow_dv_counter_release(struct mlx5_flow_counter *counter)
2141 {
2142         int ret;
2143
2144         if (!counter)
2145                 return;
2146         if (--counter->ref_cnt == 0) {
2147                 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
2148                 if (ret)
2149                         DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
2150                 LIST_REMOVE(counter, next);
2151                 rte_free(counter->dcs);
2152                 rte_free(counter);
2153         }
2154 }
2155
2156 /**
2157  * Verify the @p attributes will be correctly understood by the NIC and store
2158  * them in the @p flow if everything is correct.
2159  *
2160  * @param[in] dev
2161  *   Pointer to dev struct.
2162  * @param[in] attributes
2163  *   Pointer to flow attributes
2164  * @param[out] error
2165  *   Pointer to error structure.
2166  *
2167  * @return
2168  *   0 on success, a negative errno value otherwise and rte_errno is set.
2169  */
2170 static int
2171 flow_dv_validate_attributes(struct rte_eth_dev *dev,
2172                             const struct rte_flow_attr *attributes,
2173                             struct rte_flow_error *error)
2174 {
2175         struct mlx5_priv *priv = dev->data->dev_private;
2176         uint32_t priority_max = priv->config.flow_prio - 1;
2177
2178 #ifndef HAVE_MLX5DV_DR
2179         if (attributes->group)
2180                 return rte_flow_error_set(error, ENOTSUP,
2181                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2182                                           NULL,
2183                                           "groups is not supported");
2184 #endif
2185         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2186             attributes->priority >= priority_max)
2187                 return rte_flow_error_set(error, ENOTSUP,
2188                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2189                                           NULL,
2190                                           "priority out of range");
2191         if (attributes->transfer) {
2192                 if (!priv->config.dv_esw_en)
2193                         return rte_flow_error_set
2194                                 (error, ENOTSUP,
2195                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2196                                  "E-Switch dr is not supported");
2197                 if (!(priv->representor || priv->master))
2198                         return rte_flow_error_set
2199                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2200                                  NULL, "E-Switch configurationd can only be"
2201                                  " done by a master or a representor device");
2202                 if (attributes->egress)
2203                         return rte_flow_error_set
2204                                 (error, ENOTSUP,
2205                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2206                                  "egress is not supported");
2207                 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2208                         return rte_flow_error_set
2209                                 (error, EINVAL,
2210                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2211                                  NULL, "group must be smaller than "
2212                                  RTE_STR(MLX5_MAX_FDB_TABLES));
2213         }
2214         if (!(attributes->egress ^ attributes->ingress))
2215                 return rte_flow_error_set(error, ENOTSUP,
2216                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2217                                           "must specify exactly one of "
2218                                           "ingress or egress");
2219         return 0;
2220 }
2221
2222 /**
2223  * Internal validation function. For validating both actions and items.
2224  *
2225  * @param[in] dev
2226  *   Pointer to the rte_eth_dev structure.
2227  * @param[in] attr
2228  *   Pointer to the flow attributes.
2229  * @param[in] items
2230  *   Pointer to the list of items.
2231  * @param[in] actions
2232  *   Pointer to the list of actions.
2233  * @param[out] error
2234  *   Pointer to the error structure.
2235  *
2236  * @return
2237  *   0 on success, a negative errno value otherwise and rte_errno is set.
2238  */
2239 static int
2240 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2241                  const struct rte_flow_item items[],
2242                  const struct rte_flow_action actions[],
2243                  struct rte_flow_error *error)
2244 {
2245         int ret;
2246         uint64_t action_flags = 0;
2247         uint64_t item_flags = 0;
2248         uint64_t last_item = 0;
2249         uint8_t next_protocol = 0xff;
2250         int actions_n = 0;
2251         struct rte_flow_item_tcp nic_tcp_mask = {
2252                 .hdr = {
2253                         .tcp_flags = 0xFF,
2254                         .src_port = RTE_BE16(UINT16_MAX),
2255                         .dst_port = RTE_BE16(UINT16_MAX),
2256                 }
2257         };
2258
2259         if (items == NULL)
2260                 return -1;
2261         ret = flow_dv_validate_attributes(dev, attr, error);
2262         if (ret < 0)
2263                 return ret;
2264         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2265                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2266                 switch (items->type) {
2267                 case RTE_FLOW_ITEM_TYPE_VOID:
2268                         break;
2269                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2270                         ret = flow_dv_validate_item_port_id
2271                                         (dev, items, attr, item_flags, error);
2272                         if (ret < 0)
2273                                 return ret;
2274                         last_item |= MLX5_FLOW_ITEM_PORT_ID;
2275                         break;
2276                 case RTE_FLOW_ITEM_TYPE_ETH:
2277                         ret = mlx5_flow_validate_item_eth(items, item_flags,
2278                                                           error);
2279                         if (ret < 0)
2280                                 return ret;
2281                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2282                                              MLX5_FLOW_LAYER_OUTER_L2;
2283                         break;
2284                 case RTE_FLOW_ITEM_TYPE_VLAN:
2285                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
2286                                                            error);
2287                         if (ret < 0)
2288                                 return ret;
2289                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2290                                              MLX5_FLOW_LAYER_OUTER_VLAN;
2291                         break;
2292                 case RTE_FLOW_ITEM_TYPE_IPV4:
2293                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2294                                                            NULL, error);
2295                         if (ret < 0)
2296                                 return ret;
2297                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2298                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2299                         if (items->mask != NULL &&
2300                             ((const struct rte_flow_item_ipv4 *)
2301                              items->mask)->hdr.next_proto_id) {
2302                                 next_protocol =
2303                                         ((const struct rte_flow_item_ipv4 *)
2304                                          (items->spec))->hdr.next_proto_id;
2305                                 next_protocol &=
2306                                         ((const struct rte_flow_item_ipv4 *)
2307                                          (items->mask))->hdr.next_proto_id;
2308                         } else {
2309                                 /* Reset for inner layer. */
2310                                 next_protocol = 0xff;
2311                         }
2312                         break;
2313                 case RTE_FLOW_ITEM_TYPE_IPV6:
2314                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2315                                                            NULL, error);
2316                         if (ret < 0)
2317                                 return ret;
2318                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2319                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2320                         if (items->mask != NULL &&
2321                             ((const struct rte_flow_item_ipv6 *)
2322                              items->mask)->hdr.proto) {
2323                                 next_protocol =
2324                                         ((const struct rte_flow_item_ipv6 *)
2325                                          items->spec)->hdr.proto;
2326                                 next_protocol &=
2327                                         ((const struct rte_flow_item_ipv6 *)
2328                                          items->mask)->hdr.proto;
2329                         } else {
2330                                 /* Reset for inner layer. */
2331                                 next_protocol = 0xff;
2332                         }
2333                         break;
2334                 case RTE_FLOW_ITEM_TYPE_TCP:
2335                         ret = mlx5_flow_validate_item_tcp
2336                                                 (items, item_flags,
2337                                                  next_protocol,
2338                                                  &nic_tcp_mask,
2339                                                  error);
2340                         if (ret < 0)
2341                                 return ret;
2342                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2343                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
2344                         break;
2345                 case RTE_FLOW_ITEM_TYPE_UDP:
2346                         ret = mlx5_flow_validate_item_udp(items, item_flags,
2347                                                           next_protocol,
2348                                                           error);
2349                         if (ret < 0)
2350                                 return ret;
2351                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2352                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
2353                         break;
2354                 case RTE_FLOW_ITEM_TYPE_GRE:
2355                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2356                         ret = mlx5_flow_validate_item_gre(items, item_flags,
2357                                                           next_protocol, error);
2358                         if (ret < 0)
2359                                 return ret;
2360                         last_item = MLX5_FLOW_LAYER_GRE;
2361                         break;
2362                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2363                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2364                                                             error);
2365                         if (ret < 0)
2366                                 return ret;
2367                         last_item = MLX5_FLOW_LAYER_VXLAN;
2368                         break;
2369                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2370                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
2371                                                                 item_flags, dev,
2372                                                                 error);
2373                         if (ret < 0)
2374                                 return ret;
2375                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2376                         break;
2377                 case RTE_FLOW_ITEM_TYPE_MPLS:
2378                         ret = mlx5_flow_validate_item_mpls(dev, items,
2379                                                            item_flags,
2380                                                            last_item, error);
2381                         if (ret < 0)
2382                                 return ret;
2383                         last_item = MLX5_FLOW_LAYER_MPLS;
2384                         break;
2385                 case RTE_FLOW_ITEM_TYPE_META:
2386                         ret = flow_dv_validate_item_meta(dev, items, attr,
2387                                                          error);
2388                         if (ret < 0)
2389                                 return ret;
2390                         last_item = MLX5_FLOW_ITEM_METADATA;
2391                         break;
2392                 default:
2393                         return rte_flow_error_set(error, ENOTSUP,
2394                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2395                                                   NULL, "item not supported");
2396                 }
2397                 item_flags |= last_item;
2398         }
2399         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2400                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2401                         return rte_flow_error_set(error, ENOTSUP,
2402                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2403                                                   actions, "too many actions");
2404                 switch (actions->type) {
2405                 case RTE_FLOW_ACTION_TYPE_VOID:
2406                         break;
2407                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2408                         ret = flow_dv_validate_action_port_id(dev,
2409                                                               action_flags,
2410                                                               actions,
2411                                                               attr,
2412                                                               error);
2413                         if (ret)
2414                                 return ret;
2415                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2416                         ++actions_n;
2417                         break;
2418                 case RTE_FLOW_ACTION_TYPE_FLAG:
2419                         ret = mlx5_flow_validate_action_flag(action_flags,
2420                                                              attr, error);
2421                         if (ret < 0)
2422                                 return ret;
2423                         action_flags |= MLX5_FLOW_ACTION_FLAG;
2424                         ++actions_n;
2425                         break;
2426                 case RTE_FLOW_ACTION_TYPE_MARK:
2427                         ret = mlx5_flow_validate_action_mark(actions,
2428                                                              action_flags,
2429                                                              attr, error);
2430                         if (ret < 0)
2431                                 return ret;
2432                         action_flags |= MLX5_FLOW_ACTION_MARK;
2433                         ++actions_n;
2434                         break;
2435                 case RTE_FLOW_ACTION_TYPE_DROP:
2436                         ret = mlx5_flow_validate_action_drop(action_flags,
2437                                                              attr, error);
2438                         if (ret < 0)
2439                                 return ret;
2440                         action_flags |= MLX5_FLOW_ACTION_DROP;
2441                         ++actions_n;
2442                         break;
2443                 case RTE_FLOW_ACTION_TYPE_QUEUE:
2444                         ret = mlx5_flow_validate_action_queue(actions,
2445                                                               action_flags, dev,
2446                                                               attr, error);
2447                         if (ret < 0)
2448                                 return ret;
2449                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
2450                         ++actions_n;
2451                         break;
2452                 case RTE_FLOW_ACTION_TYPE_RSS:
2453                         ret = mlx5_flow_validate_action_rss(actions,
2454                                                             action_flags, dev,
2455                                                             attr, item_flags,
2456                                                             error);
2457                         if (ret < 0)
2458                                 return ret;
2459                         action_flags |= MLX5_FLOW_ACTION_RSS;
2460                         ++actions_n;
2461                         break;
2462                 case RTE_FLOW_ACTION_TYPE_COUNT:
2463                         ret = flow_dv_validate_action_count(dev, error);
2464                         if (ret < 0)
2465                                 return ret;
2466                         action_flags |= MLX5_FLOW_ACTION_COUNT;
2467                         ++actions_n;
2468                         break;
2469                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2470                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2471                         ret = flow_dv_validate_action_l2_encap(action_flags,
2472                                                                actions, attr,
2473                                                                error);
2474                         if (ret < 0)
2475                                 return ret;
2476                         action_flags |= actions->type ==
2477                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2478                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
2479                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
2480                         ++actions_n;
2481                         break;
2482                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2483                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2484                         ret = flow_dv_validate_action_l2_decap(action_flags,
2485                                                                attr, error);
2486                         if (ret < 0)
2487                                 return ret;
2488                         action_flags |= actions->type ==
2489                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2490                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
2491                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
2492                         ++actions_n;
2493                         break;
2494                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2495                         ret = flow_dv_validate_action_raw_encap(action_flags,
2496                                                                 actions, attr,
2497                                                                 error);
2498                         if (ret < 0)
2499                                 return ret;
2500                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2501                         ++actions_n;
2502                         break;
2503                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2504                         ret = flow_dv_validate_action_raw_decap(action_flags,
2505                                                                 actions, attr,
2506                                                                 error);
2507                         if (ret < 0)
2508                                 return ret;
2509                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2510                         ++actions_n;
2511                         break;
2512                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2513                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2514                         ret = flow_dv_validate_action_modify_mac(action_flags,
2515                                                                  actions,
2516                                                                  item_flags,
2517                                                                  error);
2518                         if (ret < 0)
2519                                 return ret;
2520                         /* Count all modify-header actions as one action. */
2521                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2522                                 ++actions_n;
2523                         action_flags |= actions->type ==
2524                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2525                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
2526                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
2527                         break;
2528
2529                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2530                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2531                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
2532                                                                   actions,
2533                                                                   item_flags,
2534                                                                   error);
2535                         if (ret < 0)
2536                                 return ret;
2537                         /* Count all modify-header actions as one action. */
2538                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2539                                 ++actions_n;
2540                         action_flags |= actions->type ==
2541                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2542                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2543                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
2544                         break;
2545                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2546                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2547                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
2548                                                                   actions,
2549                                                                   item_flags,
2550                                                                   error);
2551                         if (ret < 0)
2552                                 return ret;
2553                         /* Count all modify-header actions as one action. */
2554                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2555                                 ++actions_n;
2556                         action_flags |= actions->type ==
2557                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2558                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2559                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
2560                         break;
2561                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2562                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2563                         ret = flow_dv_validate_action_modify_tp(action_flags,
2564                                                                 actions,
2565                                                                 item_flags,
2566                                                                 error);
2567                         if (ret < 0)
2568                                 return ret;
2569                         /* Count all modify-header actions as one action. */
2570                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2571                                 ++actions_n;
2572                         action_flags |= actions->type ==
2573                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2574                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
2575                                                 MLX5_FLOW_ACTION_SET_TP_DST;
2576                         break;
2577                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2578                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2579                         ret = flow_dv_validate_action_modify_ttl(action_flags,
2580                                                                  actions,
2581                                                                  item_flags,
2582                                                                  error);
2583                         if (ret < 0)
2584                                 return ret;
2585                         /* Count all modify-header actions as one action. */
2586                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2587                                 ++actions_n;
2588                         action_flags |= actions->type ==
2589                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
2590                                                 MLX5_FLOW_ACTION_SET_TTL :
2591                                                 MLX5_FLOW_ACTION_DEC_TTL;
2592                         break;
2593                 case RTE_FLOW_ACTION_TYPE_JUMP:
2594                         ret = flow_dv_validate_action_jump(actions,
2595                                                            attr->group, error);
2596                         if (ret)
2597                                 return ret;
2598                         ++actions_n;
2599                         action_flags |= MLX5_FLOW_ACTION_JUMP;
2600                         break;
2601                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
2602                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
2603                         ret = flow_dv_validate_action_modify_tcp_seq
2604                                                                 (action_flags,
2605                                                                  actions,
2606                                                                  item_flags,
2607                                                                  error);
2608                         if (ret < 0)
2609                                 return ret;
2610                         /* Count all modify-header actions as one action. */
2611                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2612                                 ++actions_n;
2613                         action_flags |= actions->type ==
2614                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
2615                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
2616                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
2617                         break;
2618                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
2619                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
2620                         ret = flow_dv_validate_action_modify_tcp_ack
2621                                                                 (action_flags,
2622                                                                  actions,
2623                                                                  item_flags,
2624                                                                  error);
2625                         if (ret < 0)
2626                                 return ret;
2627                         /* Count all modify-header actions as one action. */
2628                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2629                                 ++actions_n;
2630                         action_flags |= actions->type ==
2631                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
2632                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
2633                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
2634                         break;
2635                 default:
2636                         return rte_flow_error_set(error, ENOTSUP,
2637                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2638                                                   actions,
2639                                                   "action not supported");
2640                 }
2641         }
2642         /* Eswitch has few restrictions on using items and actions */
2643         if (attr->transfer) {
2644                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2645                         return rte_flow_error_set(error, ENOTSUP,
2646                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2647                                                   NULL,
2648                                                   "unsupported action FLAG");
2649                 if (action_flags & MLX5_FLOW_ACTION_MARK)
2650                         return rte_flow_error_set(error, ENOTSUP,
2651                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2652                                                   NULL,
2653                                                   "unsupported action MARK");
2654                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2655                         return rte_flow_error_set(error, ENOTSUP,
2656                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2657                                                   NULL,
2658                                                   "unsupported action QUEUE");
2659                 if (action_flags & MLX5_FLOW_ACTION_RSS)
2660                         return rte_flow_error_set(error, ENOTSUP,
2661                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2662                                                   NULL,
2663                                                   "unsupported action RSS");
2664                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2665                         return rte_flow_error_set(error, EINVAL,
2666                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2667                                                   actions,
2668                                                   "no fate action is found");
2669         } else {
2670                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2671                         return rte_flow_error_set(error, EINVAL,
2672                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2673                                                   actions,
2674                                                   "no fate action is found");
2675         }
2676         return 0;
2677 }
2678
2679 /**
2680  * Internal preparation function. Allocates the DV flow size,
2681  * this size is constant.
2682  *
2683  * @param[in] attr
2684  *   Pointer to the flow attributes.
2685  * @param[in] items
2686  *   Pointer to the list of items.
2687  * @param[in] actions
2688  *   Pointer to the list of actions.
2689  * @param[out] error
2690  *   Pointer to the error structure.
2691  *
2692  * @return
2693  *   Pointer to mlx5_flow object on success,
2694  *   otherwise NULL and rte_errno is set.
2695  */
2696 static struct mlx5_flow *
2697 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2698                 const struct rte_flow_item items[] __rte_unused,
2699                 const struct rte_flow_action actions[] __rte_unused,
2700                 struct rte_flow_error *error)
2701 {
2702         uint32_t size = sizeof(struct mlx5_flow);
2703         struct mlx5_flow *flow;
2704
2705         flow = rte_calloc(__func__, 1, size, 0);
2706         if (!flow) {
2707                 rte_flow_error_set(error, ENOMEM,
2708                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2709                                    "not enough memory to create flow");
2710                 return NULL;
2711         }
2712         flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
2713         return flow;
2714 }
2715
2716 #ifndef NDEBUG
2717 /**
2718  * Sanity check for match mask and value. Similar to check_valid_spec() in
2719  * kernel driver. If unmasked bit is present in value, it returns failure.
2720  *
2721  * @param match_mask
2722  *   pointer to match mask buffer.
2723  * @param match_value
2724  *   pointer to match value buffer.
2725  *
2726  * @return
2727  *   0 if valid, -EINVAL otherwise.
2728  */
2729 static int
2730 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2731 {
2732         uint8_t *m = match_mask;
2733         uint8_t *v = match_value;
2734         unsigned int i;
2735
2736         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
2737                 if (v[i] & ~m[i]) {
2738                         DRV_LOG(ERR,
2739                                 "match_value differs from match_criteria"
2740                                 " %p[%u] != %p[%u]",
2741                                 match_value, i, match_mask, i);
2742                         return -EINVAL;
2743                 }
2744         }
2745         return 0;
2746 }
2747 #endif
2748
2749 /**
2750  * Add Ethernet item to matcher and to the value.
2751  *
2752  * @param[in, out] matcher
2753  *   Flow matcher.
2754  * @param[in, out] key
2755  *   Flow matcher value.
2756  * @param[in] item
2757  *   Flow pattern to translate.
2758  * @param[in] inner
2759  *   Item is inner pattern.
2760  */
2761 static void
2762 flow_dv_translate_item_eth(void *matcher, void *key,
2763                            const struct rte_flow_item *item, int inner)
2764 {
2765         const struct rte_flow_item_eth *eth_m = item->mask;
2766         const struct rte_flow_item_eth *eth_v = item->spec;
2767         const struct rte_flow_item_eth nic_mask = {
2768                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2769                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2770                 .type = RTE_BE16(0xffff),
2771         };
2772         void *headers_m;
2773         void *headers_v;
2774         char *l24_v;
2775         unsigned int i;
2776
2777         if (!eth_v)
2778                 return;
2779         if (!eth_m)
2780                 eth_m = &nic_mask;
2781         if (inner) {
2782                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2783                                          inner_headers);
2784                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2785         } else {
2786                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2787                                          outer_headers);
2788                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2789         }
2790         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2791                &eth_m->dst, sizeof(eth_m->dst));
2792         /* The value must be in the range of the mask. */
2793         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2794         for (i = 0; i < sizeof(eth_m->dst); ++i)
2795                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2796         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2797                &eth_m->src, sizeof(eth_m->src));
2798         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2799         /* The value must be in the range of the mask. */
2800         for (i = 0; i < sizeof(eth_m->dst); ++i)
2801                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2802         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2803                  rte_be_to_cpu_16(eth_m->type));
2804         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2805         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2806 }
2807
2808 /**
2809  * Add VLAN item to matcher and to the value.
2810  *
2811  * @param[in, out] matcher
2812  *   Flow matcher.
2813  * @param[in, out] key
2814  *   Flow matcher value.
2815  * @param[in] item
2816  *   Flow pattern to translate.
2817  * @param[in] inner
2818  *   Item is inner pattern.
2819  */
2820 static void
2821 flow_dv_translate_item_vlan(void *matcher, void *key,
2822                             const struct rte_flow_item *item,
2823                             int inner)
2824 {
2825         const struct rte_flow_item_vlan *vlan_m = item->mask;
2826         const struct rte_flow_item_vlan *vlan_v = item->spec;
2827         const struct rte_flow_item_vlan nic_mask = {
2828                 .tci = RTE_BE16(0x0fff),
2829                 .inner_type = RTE_BE16(0xffff),
2830         };
2831         void *headers_m;
2832         void *headers_v;
2833         uint16_t tci_m;
2834         uint16_t tci_v;
2835
2836         if (!vlan_v)
2837                 return;
2838         if (!vlan_m)
2839                 vlan_m = &nic_mask;
2840         if (inner) {
2841                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2842                                          inner_headers);
2843                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2844         } else {
2845                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2846                                          outer_headers);
2847                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2848         }
2849         tci_m = rte_be_to_cpu_16(vlan_m->tci);
2850         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2851         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2852         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2853         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2854         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2855         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2856         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2857         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2858         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2859 }
2860
2861 /**
2862  * Add IPV4 item to matcher and to the value.
2863  *
2864  * @param[in, out] matcher
2865  *   Flow matcher.
2866  * @param[in, out] key
2867  *   Flow matcher value.
2868  * @param[in] item
2869  *   Flow pattern to translate.
2870  * @param[in] inner
2871  *   Item is inner pattern.
2872  * @param[in] group
2873  *   The group to insert the rule.
2874  */
2875 static void
2876 flow_dv_translate_item_ipv4(void *matcher, void *key,
2877                             const struct rte_flow_item *item,
2878                             int inner, uint32_t group)
2879 {
2880         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2881         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2882         const struct rte_flow_item_ipv4 nic_mask = {
2883                 .hdr = {
2884                         .src_addr = RTE_BE32(0xffffffff),
2885                         .dst_addr = RTE_BE32(0xffffffff),
2886                         .type_of_service = 0xff,
2887                         .next_proto_id = 0xff,
2888                 },
2889         };
2890         void *headers_m;
2891         void *headers_v;
2892         char *l24_m;
2893         char *l24_v;
2894         uint8_t tos;
2895
2896         if (inner) {
2897                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2898                                          inner_headers);
2899                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2900         } else {
2901                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2902                                          outer_headers);
2903                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2904         }
2905         if (group == 0)
2906                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2907         else
2908                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2909         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2910         if (!ipv4_v)
2911                 return;
2912         if (!ipv4_m)
2913                 ipv4_m = &nic_mask;
2914         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2915                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2916         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2917                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2918         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2919         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2920         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2921                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2922         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2923                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2924         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2925         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2926         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2927         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2928                  ipv4_m->hdr.type_of_service);
2929         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2930         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2931                  ipv4_m->hdr.type_of_service >> 2);
2932         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2933         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2934                  ipv4_m->hdr.next_proto_id);
2935         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2936                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2937 }
2938
2939 /**
2940  * Add IPV6 item to matcher and to the value.
2941  *
2942  * @param[in, out] matcher
2943  *   Flow matcher.
2944  * @param[in, out] key
2945  *   Flow matcher value.
2946  * @param[in] item
2947  *   Flow pattern to translate.
2948  * @param[in] inner
2949  *   Item is inner pattern.
2950  * @param[in] group
2951  *   The group to insert the rule.
2952  */
2953 static void
2954 flow_dv_translate_item_ipv6(void *matcher, void *key,
2955                             const struct rte_flow_item *item,
2956                             int inner, uint32_t group)
2957 {
2958         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2959         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2960         const struct rte_flow_item_ipv6 nic_mask = {
2961                 .hdr = {
2962                         .src_addr =
2963                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2964                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2965                         .dst_addr =
2966                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2967                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2968                         .vtc_flow = RTE_BE32(0xffffffff),
2969                         .proto = 0xff,
2970                         .hop_limits = 0xff,
2971                 },
2972         };
2973         void *headers_m;
2974         void *headers_v;
2975         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2976         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2977         char *l24_m;
2978         char *l24_v;
2979         uint32_t vtc_m;
2980         uint32_t vtc_v;
2981         int i;
2982         int size;
2983
2984         if (inner) {
2985                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2986                                          inner_headers);
2987                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2988         } else {
2989                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2990                                          outer_headers);
2991                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2992         }
2993         if (group == 0)
2994                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2995         else
2996                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2997         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2998         if (!ipv6_v)
2999                 return;
3000         if (!ipv6_m)
3001                 ipv6_m = &nic_mask;
3002         size = sizeof(ipv6_m->hdr.dst_addr);
3003         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3004                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3005         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3006                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3007         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
3008         for (i = 0; i < size; ++i)
3009                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
3010         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3011                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
3012         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3013                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
3014         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
3015         for (i = 0; i < size; ++i)
3016                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
3017         /* TOS. */
3018         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
3019         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
3020         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
3021         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
3022         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
3023         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
3024         /* Label. */
3025         if (inner) {
3026                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
3027                          vtc_m);
3028                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
3029                          vtc_v);
3030         } else {
3031                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
3032                          vtc_m);
3033                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
3034                          vtc_v);
3035         }
3036         /* Protocol. */
3037         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3038                  ipv6_m->hdr.proto);
3039         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3040                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
3041 }
3042
3043 /**
3044  * Add TCP item to matcher and to the value.
3045  *
3046  * @param[in, out] matcher
3047  *   Flow matcher.
3048  * @param[in, out] key
3049  *   Flow matcher value.
3050  * @param[in] item
3051  *   Flow pattern to translate.
3052  * @param[in] inner
3053  *   Item is inner pattern.
3054  */
3055 static void
3056 flow_dv_translate_item_tcp(void *matcher, void *key,
3057                            const struct rte_flow_item *item,
3058                            int inner)
3059 {
3060         const struct rte_flow_item_tcp *tcp_m = item->mask;
3061         const struct rte_flow_item_tcp *tcp_v = item->spec;
3062         void *headers_m;
3063         void *headers_v;
3064
3065         if (inner) {
3066                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3067                                          inner_headers);
3068                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3069         } else {
3070                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3071                                          outer_headers);
3072                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3073         }
3074         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3075         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
3076         if (!tcp_v)
3077                 return;
3078         if (!tcp_m)
3079                 tcp_m = &rte_flow_item_tcp_mask;
3080         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
3081                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
3082         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
3083                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
3084         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
3085                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
3086         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
3087                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
3088         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
3089                  tcp_m->hdr.tcp_flags);
3090         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
3091                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
3092 }
3093
3094 /**
3095  * Add UDP item to matcher and to the value.
3096  *
3097  * @param[in, out] matcher
3098  *   Flow matcher.
3099  * @param[in, out] key
3100  *   Flow matcher value.
3101  * @param[in] item
3102  *   Flow pattern to translate.
3103  * @param[in] inner
3104  *   Item is inner pattern.
3105  */
3106 static void
3107 flow_dv_translate_item_udp(void *matcher, void *key,
3108                            const struct rte_flow_item *item,
3109                            int inner)
3110 {
3111         const struct rte_flow_item_udp *udp_m = item->mask;
3112         const struct rte_flow_item_udp *udp_v = item->spec;
3113         void *headers_m;
3114         void *headers_v;
3115
3116         if (inner) {
3117                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3118                                          inner_headers);
3119                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3120         } else {
3121                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3122                                          outer_headers);
3123                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3124         }
3125         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3126         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
3127         if (!udp_v)
3128                 return;
3129         if (!udp_m)
3130                 udp_m = &rte_flow_item_udp_mask;
3131         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
3132                  rte_be_to_cpu_16(udp_m->hdr.src_port));
3133         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
3134                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
3135         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
3136                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
3137         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3138                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
3139 }
3140
3141 /**
3142  * Add GRE item to matcher and to the value.
3143  *
3144  * @param[in, out] matcher
3145  *   Flow matcher.
3146  * @param[in, out] key
3147  *   Flow matcher value.
3148  * @param[in] item
3149  *   Flow pattern to translate.
3150  * @param[in] inner
3151  *   Item is inner pattern.
3152  */
3153 static void
3154 flow_dv_translate_item_gre(void *matcher, void *key,
3155                            const struct rte_flow_item *item,
3156                            int inner)
3157 {
3158         const struct rte_flow_item_gre *gre_m = item->mask;
3159         const struct rte_flow_item_gre *gre_v = item->spec;
3160         void *headers_m;
3161         void *headers_v;
3162         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3163         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3164
3165         if (inner) {
3166                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3167                                          inner_headers);
3168                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3169         } else {
3170                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3171                                          outer_headers);
3172                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3173         }
3174         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3175         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
3176         if (!gre_v)
3177                 return;
3178         if (!gre_m)
3179                 gre_m = &rte_flow_item_gre_mask;
3180         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
3181                  rte_be_to_cpu_16(gre_m->protocol));
3182         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3183                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
3184 }
3185
3186 /**
3187  * Add NVGRE item to matcher and to the value.
3188  *
3189  * @param[in, out] matcher
3190  *   Flow matcher.
3191  * @param[in, out] key
3192  *   Flow matcher value.
3193  * @param[in] item
3194  *   Flow pattern to translate.
3195  * @param[in] inner
3196  *   Item is inner pattern.
3197  */
3198 static void
3199 flow_dv_translate_item_nvgre(void *matcher, void *key,
3200                              const struct rte_flow_item *item,
3201                              int inner)
3202 {
3203         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
3204         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
3205         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3206         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3207         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
3208         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
3209         char *gre_key_m;
3210         char *gre_key_v;
3211         int size;
3212         int i;
3213
3214         flow_dv_translate_item_gre(matcher, key, item, inner);
3215         if (!nvgre_v)
3216                 return;
3217         if (!nvgre_m)
3218                 nvgre_m = &rte_flow_item_nvgre_mask;
3219         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
3220         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
3221         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
3222         memcpy(gre_key_m, tni_flow_id_m, size);
3223         for (i = 0; i < size; ++i)
3224                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
3225 }
3226
3227 /**
3228  * Add VXLAN item to matcher and to the value.
3229  *
3230  * @param[in, out] matcher
3231  *   Flow matcher.
3232  * @param[in, out] key
3233  *   Flow matcher value.
3234  * @param[in] item
3235  *   Flow pattern to translate.
3236  * @param[in] inner
3237  *   Item is inner pattern.
3238  */
3239 static void
3240 flow_dv_translate_item_vxlan(void *matcher, void *key,
3241                              const struct rte_flow_item *item,
3242                              int inner)
3243 {
3244         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3245         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3246         void *headers_m;
3247         void *headers_v;
3248         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3249         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3250         char *vni_m;
3251         char *vni_v;
3252         uint16_t dport;
3253         int size;
3254         int i;
3255
3256         if (inner) {
3257                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3258                                          inner_headers);
3259                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3260         } else {
3261                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3262                                          outer_headers);
3263                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3264         }
3265         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3266                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3267         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3268                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3269                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3270         }
3271         if (!vxlan_v)
3272                 return;
3273         if (!vxlan_m)
3274                 vxlan_m = &rte_flow_item_vxlan_mask;
3275         size = sizeof(vxlan_m->vni);
3276         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3277         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3278         memcpy(vni_m, vxlan_m->vni, size);
3279         for (i = 0; i < size; ++i)
3280                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3281 }
3282
3283 /**
3284  * Add MPLS item to matcher and to the value.
3285  *
3286  * @param[in, out] matcher
3287  *   Flow matcher.
3288  * @param[in, out] key
3289  *   Flow matcher value.
3290  * @param[in] item
3291  *   Flow pattern to translate.
3292  * @param[in] prev_layer
3293  *   The protocol layer indicated in previous item.
3294  * @param[in] inner
3295  *   Item is inner pattern.
3296  */
3297 static void
3298 flow_dv_translate_item_mpls(void *matcher, void *key,
3299                             const struct rte_flow_item *item,
3300                             uint64_t prev_layer,
3301                             int inner)
3302 {
3303         const uint32_t *in_mpls_m = item->mask;
3304         const uint32_t *in_mpls_v = item->spec;
3305         uint32_t *out_mpls_m = 0;
3306         uint32_t *out_mpls_v = 0;
3307         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3308         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3309         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3310                                      misc_parameters_2);
3311         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3312         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3313         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3314
3315         switch (prev_layer) {
3316         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3317                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3318                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3319                          MLX5_UDP_PORT_MPLS);
3320                 break;
3321         case MLX5_FLOW_LAYER_GRE:
3322                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3323                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3324                          RTE_ETHER_TYPE_MPLS);
3325                 break;
3326         default:
3327                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3328                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3329                          IPPROTO_MPLS);
3330                 break;
3331         }
3332         if (!in_mpls_v)
3333                 return;
3334         if (!in_mpls_m)
3335                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3336         switch (prev_layer) {
3337         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3338                 out_mpls_m =
3339                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3340                                                  outer_first_mpls_over_udp);
3341                 out_mpls_v =
3342                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3343                                                  outer_first_mpls_over_udp);
3344                 break;
3345         case MLX5_FLOW_LAYER_GRE:
3346                 out_mpls_m =
3347                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3348                                                  outer_first_mpls_over_gre);
3349                 out_mpls_v =
3350                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3351                                                  outer_first_mpls_over_gre);
3352                 break;
3353         default:
3354                 /* Inner MPLS not over GRE is not supported. */
3355                 if (!inner) {
3356                         out_mpls_m =
3357                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3358                                                          misc2_m,
3359                                                          outer_first_mpls);
3360                         out_mpls_v =
3361                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3362                                                          misc2_v,
3363                                                          outer_first_mpls);
3364                 }
3365                 break;
3366         }
3367         if (out_mpls_m && out_mpls_v) {
3368                 *out_mpls_m = *in_mpls_m;
3369                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3370         }
3371 }
3372
3373 /**
3374  * Add META item to matcher
3375  *
3376  * @param[in, out] matcher
3377  *   Flow matcher.
3378  * @param[in, out] key
3379  *   Flow matcher value.
3380  * @param[in] item
3381  *   Flow pattern to translate.
3382  * @param[in] inner
3383  *   Item is inner pattern.
3384  */
3385 static void
3386 flow_dv_translate_item_meta(void *matcher, void *key,
3387                             const struct rte_flow_item *item)
3388 {
3389         const struct rte_flow_item_meta *meta_m;
3390         const struct rte_flow_item_meta *meta_v;
3391         void *misc2_m =
3392                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3393         void *misc2_v =
3394                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3395
3396         meta_m = (const void *)item->mask;
3397         if (!meta_m)
3398                 meta_m = &rte_flow_item_meta_mask;
3399         meta_v = (const void *)item->spec;
3400         if (meta_v) {
3401                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3402                          rte_be_to_cpu_32(meta_m->data));
3403                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3404                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
3405         }
3406 }
3407
3408 /**
3409  * Add source vport match to the specified matcher.
3410  *
3411  * @param[in, out] matcher
3412  *   Flow matcher.
3413  * @param[in, out] key
3414  *   Flow matcher value.
3415  * @param[in] port
3416  *   Source vport value to match
3417  * @param[in] mask
3418  *   Mask
3419  */
3420 static void
3421 flow_dv_translate_item_source_vport(void *matcher, void *key,
3422                                     int16_t port, uint16_t mask)
3423 {
3424         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3425         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3426
3427         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3428         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3429 }
3430
3431 /**
3432  * Translate port-id item to eswitch match on  port-id.
3433  *
3434  * @param[in] dev
3435  *   The devich to configure through.
3436  * @param[in, out] matcher
3437  *   Flow matcher.
3438  * @param[in, out] key
3439  *   Flow matcher value.
3440  * @param[in] item
3441  *   Flow pattern to translate.
3442  *
3443  * @return
3444  *   0 on success, a negative errno value otherwise.
3445  */
3446 static int
3447 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3448                                void *key, const struct rte_flow_item *item)
3449 {
3450         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3451         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3452         uint16_t mask, val, id;
3453         int ret;
3454
3455         mask = pid_m ? pid_m->id : 0xffff;
3456         id = pid_v ? pid_v->id : dev->data->port_id;
3457         ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3458         if (ret)
3459                 return ret;
3460         flow_dv_translate_item_source_vport(matcher, key, val, mask);
3461         return 0;
3462 }
3463
3464 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3465
3466 #define HEADER_IS_ZERO(match_criteria, headers)                              \
3467         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
3468                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3469
3470 /**
3471  * Calculate flow matcher enable bitmap.
3472  *
3473  * @param match_criteria
3474  *   Pointer to flow matcher criteria.
3475  *
3476  * @return
3477  *   Bitmap of enabled fields.
3478  */
3479 static uint8_t
3480 flow_dv_matcher_enable(uint32_t *match_criteria)
3481 {
3482         uint8_t match_criteria_enable;
3483
3484         match_criteria_enable =
3485                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3486                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3487         match_criteria_enable |=
3488                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3489                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3490         match_criteria_enable |=
3491                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3492                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3493         match_criteria_enable |=
3494                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3495                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3496 #ifdef HAVE_MLX5DV_DR
3497         match_criteria_enable |=
3498                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3499                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3500 #endif
3501         return match_criteria_enable;
3502 }
3503
3504
3505 /**
3506  * Get a flow table.
3507  *
3508  * @param dev[in, out]
3509  *   Pointer to rte_eth_dev structure.
3510  * @param[in] table_id
3511  *   Table id to use.
3512  * @param[in] egress
3513  *   Direction of the table.
3514  * @param[in] transfer
3515  *   E-Switch or NIC flow.
3516  * @param[out] error
3517  *   pointer to error structure.
3518  *
3519  * @return
3520  *   Returns tables resource based on the index, NULL in case of failed.
3521  */
3522 static struct mlx5_flow_tbl_resource *
3523 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3524                          uint32_t table_id, uint8_t egress,
3525                          uint8_t transfer,
3526                          struct rte_flow_error *error)
3527 {
3528         struct mlx5_priv *priv = dev->data->dev_private;
3529         struct mlx5_ibv_shared *sh = priv->sh;
3530         struct mlx5_flow_tbl_resource *tbl;
3531
3532 #ifdef HAVE_MLX5DV_DR
3533         if (transfer) {
3534                 tbl = &sh->fdb_tbl[table_id];
3535                 if (!tbl->obj)
3536                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3537                                 (sh->fdb_domain, table_id);
3538         } else if (egress) {
3539                 tbl = &sh->tx_tbl[table_id];
3540                 if (!tbl->obj)
3541                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3542                                 (sh->tx_domain, table_id);
3543         } else {
3544                 tbl = &sh->rx_tbl[table_id];
3545                 if (!tbl->obj)
3546                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3547                                 (sh->rx_domain, table_id);
3548         }
3549         if (!tbl->obj) {
3550                 rte_flow_error_set(error, ENOMEM,
3551                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3552                                    NULL, "cannot create table");
3553                 return NULL;
3554         }
3555         rte_atomic32_inc(&tbl->refcnt);
3556         return tbl;
3557 #else
3558         (void)error;
3559         (void)tbl;
3560         if (transfer)
3561                 return &sh->fdb_tbl[table_id];
3562         else if (egress)
3563                 return &sh->tx_tbl[table_id];
3564         else
3565                 return &sh->rx_tbl[table_id];
3566 #endif
3567 }
3568
3569 /**
3570  * Release a flow table.
3571  *
3572  * @param[in] tbl
3573  *   Table resource to be released.
3574  *
3575  * @return
3576  *   Returns 0 if table was released, else return 1;
3577  */
3578 static int
3579 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3580 {
3581         if (!tbl)
3582                 return 0;
3583         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3584                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3585                 tbl->obj = NULL;
3586                 return 0;
3587         }
3588         return 1;
3589 }
3590
3591 /**
3592  * Register the flow matcher.
3593  *
3594  * @param dev[in, out]
3595  *   Pointer to rte_eth_dev structure.
3596  * @param[in, out] matcher
3597  *   Pointer to flow matcher.
3598  * @parm[in, out] dev_flow
3599  *   Pointer to the dev_flow.
3600  * @param[out] error
3601  *   pointer to error structure.
3602  *
3603  * @return
3604  *   0 on success otherwise -errno and errno is set.
3605  */
3606 static int
3607 flow_dv_matcher_register(struct rte_eth_dev *dev,
3608                          struct mlx5_flow_dv_matcher *matcher,
3609                          struct mlx5_flow *dev_flow,
3610                          struct rte_flow_error *error)
3611 {
3612         struct mlx5_priv *priv = dev->data->dev_private;
3613         struct mlx5_ibv_shared *sh = priv->sh;
3614         struct mlx5_flow_dv_matcher *cache_matcher;
3615         struct mlx5dv_flow_matcher_attr dv_attr = {
3616                 .type = IBV_FLOW_ATTR_NORMAL,
3617                 .match_mask = (void *)&matcher->mask,
3618         };
3619         struct mlx5_flow_tbl_resource *tbl = NULL;
3620
3621         /* Lookup from cache. */
3622         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3623                 if (matcher->crc == cache_matcher->crc &&
3624                     matcher->priority == cache_matcher->priority &&
3625                     matcher->egress == cache_matcher->egress &&
3626                     matcher->group == cache_matcher->group &&
3627                     matcher->transfer == cache_matcher->transfer &&
3628                     !memcmp((const void *)matcher->mask.buf,
3629                             (const void *)cache_matcher->mask.buf,
3630                             cache_matcher->mask.size)) {
3631                         DRV_LOG(DEBUG,
3632                                 "priority %hd use %s matcher %p: refcnt %d++",
3633                                 cache_matcher->priority,
3634                                 cache_matcher->egress ? "tx" : "rx",
3635                                 (void *)cache_matcher,
3636                                 rte_atomic32_read(&cache_matcher->refcnt));
3637                         rte_atomic32_inc(&cache_matcher->refcnt);
3638                         dev_flow->dv.matcher = cache_matcher;
3639                         return 0;
3640                 }
3641         }
3642         /* Register new matcher. */
3643         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3644         if (!cache_matcher)
3645                 return rte_flow_error_set(error, ENOMEM,
3646                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3647                                           "cannot allocate matcher memory");
3648         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3649                                        matcher->egress, matcher->transfer,
3650                                        error);
3651         if (!tbl) {
3652                 rte_free(cache_matcher);
3653                 return rte_flow_error_set(error, ENOMEM,
3654                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3655                                           NULL, "cannot create table");
3656         }
3657         *cache_matcher = *matcher;
3658         dv_attr.match_criteria_enable =
3659                 flow_dv_matcher_enable(cache_matcher->mask.buf);
3660         dv_attr.priority = matcher->priority;
3661         if (matcher->egress)
3662                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3663         cache_matcher->matcher_object =
3664                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3665         if (!cache_matcher->matcher_object) {
3666                 rte_free(cache_matcher);
3667 #ifdef HAVE_MLX5DV_DR
3668                 flow_dv_tbl_resource_release(tbl);
3669 #endif
3670                 return rte_flow_error_set(error, ENOMEM,
3671                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3672                                           NULL, "cannot create matcher");
3673         }
3674         rte_atomic32_inc(&cache_matcher->refcnt);
3675         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3676         dev_flow->dv.matcher = cache_matcher;
3677         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3678                 cache_matcher->priority,
3679                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3680                 rte_atomic32_read(&cache_matcher->refcnt));
3681         rte_atomic32_inc(&tbl->refcnt);
3682         return 0;
3683 }
3684
3685 /**
3686  * Find existing tag resource or create and register a new one.
3687  *
3688  * @param dev[in, out]
3689  *   Pointer to rte_eth_dev structure.
3690  * @param[in, out] resource
3691  *   Pointer to tag resource.
3692  * @parm[in, out] dev_flow
3693  *   Pointer to the dev_flow.
3694  * @param[out] error
3695  *   pointer to error structure.
3696  *
3697  * @return
3698  *   0 on success otherwise -errno and errno is set.
3699  */
3700 static int
3701 flow_dv_tag_resource_register
3702                         (struct rte_eth_dev *dev,
3703                          struct mlx5_flow_dv_tag_resource *resource,
3704                          struct mlx5_flow *dev_flow,
3705                          struct rte_flow_error *error)
3706 {
3707         struct mlx5_priv *priv = dev->data->dev_private;
3708         struct mlx5_ibv_shared *sh = priv->sh;
3709         struct mlx5_flow_dv_tag_resource *cache_resource;
3710
3711         /* Lookup a matching resource from cache. */
3712         LIST_FOREACH(cache_resource, &sh->tags, next) {
3713                 if (resource->tag == cache_resource->tag) {
3714                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3715                                 (void *)cache_resource,
3716                                 rte_atomic32_read(&cache_resource->refcnt));
3717                         rte_atomic32_inc(&cache_resource->refcnt);
3718                         dev_flow->flow->tag_resource = cache_resource;
3719                         return 0;
3720                 }
3721         }
3722         /* Register new  resource. */
3723         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3724         if (!cache_resource)
3725                 return rte_flow_error_set(error, ENOMEM,
3726                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3727                                           "cannot allocate resource memory");
3728         *cache_resource = *resource;
3729         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3730                 (resource->tag);
3731         if (!cache_resource->action) {
3732                 rte_free(cache_resource);
3733                 return rte_flow_error_set(error, ENOMEM,
3734                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3735                                           NULL, "cannot create action");
3736         }
3737         rte_atomic32_init(&cache_resource->refcnt);
3738         rte_atomic32_inc(&cache_resource->refcnt);
3739         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3740         dev_flow->flow->tag_resource = cache_resource;
3741         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3742                 (void *)cache_resource,
3743                 rte_atomic32_read(&cache_resource->refcnt));
3744         return 0;
3745 }
3746
3747 /**
3748  * Release the tag.
3749  *
3750  * @param dev
3751  *   Pointer to Ethernet device.
3752  * @param flow
3753  *   Pointer to mlx5_flow.
3754  *
3755  * @return
3756  *   1 while a reference on it exists, 0 when freed.
3757  */
3758 static int
3759 flow_dv_tag_release(struct rte_eth_dev *dev,
3760                     struct mlx5_flow_dv_tag_resource *tag)
3761 {
3762         assert(tag);
3763         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3764                 dev->data->port_id, (void *)tag,
3765                 rte_atomic32_read(&tag->refcnt));
3766         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3767                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3768                 LIST_REMOVE(tag, next);
3769                 DRV_LOG(DEBUG, "port %u tag %p: removed",
3770                         dev->data->port_id, (void *)tag);
3771                 rte_free(tag);
3772                 return 0;
3773         }
3774         return 1;
3775 }
3776
3777 /**
3778  * Translate port ID action to vport.
3779  *
3780  * @param[in] dev
3781  *   Pointer to rte_eth_dev structure.
3782  * @param[in] action
3783  *   Pointer to the port ID action.
3784  * @param[out] dst_port_id
3785  *   The target port ID.
3786  * @param[out] error
3787  *   Pointer to the error structure.
3788  *
3789  * @return
3790  *   0 on success, a negative errno value otherwise and rte_errno is set.
3791  */
3792 static int
3793 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
3794                                  const struct rte_flow_action *action,
3795                                  uint32_t *dst_port_id,
3796                                  struct rte_flow_error *error)
3797 {
3798         uint32_t port;
3799         uint16_t port_id;
3800         int ret;
3801         const struct rte_flow_action_port_id *conf =
3802                         (const struct rte_flow_action_port_id *)action->conf;
3803
3804         port = conf->original ? dev->data->port_id : conf->id;
3805         ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
3806         if (ret)
3807                 return rte_flow_error_set(error, -ret,
3808                                           RTE_FLOW_ERROR_TYPE_ACTION,
3809                                           NULL,
3810                                           "No eswitch info was found for port");
3811         *dst_port_id = port_id;
3812         return 0;
3813 }
3814
3815 /**
3816  * Fill the flow with DV spec.
3817  *
3818  * @param[in] dev
3819  *   Pointer to rte_eth_dev structure.
3820  * @param[in, out] dev_flow
3821  *   Pointer to the sub flow.
3822  * @param[in] attr
3823  *   Pointer to the flow attributes.
3824  * @param[in] items
3825  *   Pointer to the list of items.
3826  * @param[in] actions
3827  *   Pointer to the list of actions.
3828  * @param[out] error
3829  *   Pointer to the error structure.
3830  *
3831  * @return
3832  *   0 on success, a negative errno value otherwise and rte_errno is set.
3833  */
3834 static int
3835 flow_dv_translate(struct rte_eth_dev *dev,
3836                   struct mlx5_flow *dev_flow,
3837                   const struct rte_flow_attr *attr,
3838                   const struct rte_flow_item items[],
3839                   const struct rte_flow_action actions[],
3840                   struct rte_flow_error *error)
3841 {
3842         struct mlx5_priv *priv = dev->data->dev_private;
3843         struct rte_flow *flow = dev_flow->flow;
3844         uint64_t item_flags = 0;
3845         uint64_t last_item = 0;
3846         uint64_t action_flags = 0;
3847         uint64_t priority = attr->priority;
3848         struct mlx5_flow_dv_matcher matcher = {
3849                 .mask = {
3850                         .size = sizeof(matcher.mask.buf),
3851                 },
3852         };
3853         int actions_n = 0;
3854         bool actions_end = false;
3855         struct mlx5_flow_dv_modify_hdr_resource res = {
3856                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3857                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3858         };
3859         union flow_dv_attr flow_attr = { .attr = 0 };
3860         struct mlx5_flow_dv_tag_resource tag_resource;
3861         uint32_t modify_action_position = UINT32_MAX;
3862         void *match_mask = matcher.mask.buf;
3863         void *match_value = dev_flow->dv.value.buf;
3864
3865         flow->group = attr->group;
3866         if (attr->transfer)
3867                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3868         if (priority == MLX5_FLOW_PRIO_RSVD)
3869                 priority = priv->config.flow_prio - 1;
3870         for (; !actions_end ; actions++) {
3871                 const struct rte_flow_action_queue *queue;
3872                 const struct rte_flow_action_rss *rss;
3873                 const struct rte_flow_action *action = actions;
3874                 const struct rte_flow_action_count *count = action->conf;
3875                 const uint8_t *rss_key;
3876                 const struct rte_flow_action_jump *jump_data;
3877                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3878                 struct mlx5_flow_tbl_resource *tbl;
3879                 uint32_t port_id = 0;
3880                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
3881
3882                 switch (actions->type) {
3883                 case RTE_FLOW_ACTION_TYPE_VOID:
3884                         break;
3885                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3886                         if (flow_dv_translate_action_port_id(dev, action,
3887                                                              &port_id, error))
3888                                 return -rte_errno;
3889                         port_id_resource.port_id = port_id;
3890                         if (flow_dv_port_id_action_resource_register
3891                             (dev, &port_id_resource, dev_flow, error))
3892                                 return -rte_errno;
3893                         dev_flow->dv.actions[actions_n++] =
3894                                 dev_flow->dv.port_id_action->action;
3895                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3896                         break;
3897                 case RTE_FLOW_ACTION_TYPE_FLAG:
3898                         tag_resource.tag =
3899                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3900                         if (!flow->tag_resource)
3901                                 if (flow_dv_tag_resource_register
3902                                     (dev, &tag_resource, dev_flow, error))
3903                                         return errno;
3904                         dev_flow->dv.actions[actions_n++] =
3905                                 flow->tag_resource->action;
3906                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3907                         break;
3908                 case RTE_FLOW_ACTION_TYPE_MARK:
3909                         tag_resource.tag = mlx5_flow_mark_set
3910                               (((const struct rte_flow_action_mark *)
3911                                (actions->conf))->id);
3912                         if (!flow->tag_resource)
3913                                 if (flow_dv_tag_resource_register
3914                                     (dev, &tag_resource, dev_flow, error))
3915                                         return errno;
3916                         dev_flow->dv.actions[actions_n++] =
3917                                 flow->tag_resource->action;
3918                         action_flags |= MLX5_FLOW_ACTION_MARK;
3919                         break;
3920                 case RTE_FLOW_ACTION_TYPE_DROP:
3921                         action_flags |= MLX5_FLOW_ACTION_DROP;
3922                         break;
3923                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3924                         queue = actions->conf;
3925                         flow->rss.queue_num = 1;
3926                         (*flow->queue)[0] = queue->index;
3927                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3928                         break;
3929                 case RTE_FLOW_ACTION_TYPE_RSS:
3930                         rss = actions->conf;
3931                         if (flow->queue)
3932                                 memcpy((*flow->queue), rss->queue,
3933                                        rss->queue_num * sizeof(uint16_t));
3934                         flow->rss.queue_num = rss->queue_num;
3935                         /* NULL RSS key indicates default RSS key. */
3936                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
3937                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3938                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3939                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3940                         flow->rss.level = rss->level;
3941                         action_flags |= MLX5_FLOW_ACTION_RSS;
3942                         break;
3943                 case RTE_FLOW_ACTION_TYPE_COUNT:
3944                         if (!priv->config.devx) {
3945                                 rte_errno = ENOTSUP;
3946                                 goto cnt_err;
3947                         }
3948                         flow->counter = flow_dv_counter_new(dev, count->shared,
3949                                                             count->id);
3950                         if (flow->counter == NULL)
3951                                 goto cnt_err;
3952                         dev_flow->dv.actions[actions_n++] =
3953                                 flow->counter->action;
3954                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3955                         break;
3956 cnt_err:
3957                         if (rte_errno == ENOTSUP)
3958                                 return rte_flow_error_set
3959                                               (error, ENOTSUP,
3960                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3961                                                NULL,
3962                                                "count action not supported");
3963                         else
3964                                 return rte_flow_error_set
3965                                                 (error, rte_errno,
3966                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3967                                                  action,
3968                                                  "cannot create counter"
3969                                                   " object.");
3970                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3971                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3972                         if (flow_dv_create_action_l2_encap(dev, actions,
3973                                                            dev_flow,
3974                                                            attr->transfer,
3975                                                            error))
3976                                 return -rte_errno;
3977                         dev_flow->dv.actions[actions_n++] =
3978                                 dev_flow->dv.encap_decap->verbs_action;
3979                         action_flags |= actions->type ==
3980                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3981                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3982                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3983                         break;
3984                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3985                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3986                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
3987                                                            attr->transfer,
3988                                                            error))
3989                                 return -rte_errno;
3990                         dev_flow->dv.actions[actions_n++] =
3991                                 dev_flow->dv.encap_decap->verbs_action;
3992                         action_flags |= actions->type ==
3993                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3994                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3995                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3996                         break;
3997                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3998                         /* Handle encap with preceding decap. */
3999                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
4000                                 if (flow_dv_create_action_raw_encap
4001                                         (dev, actions, dev_flow, attr, error))
4002                                         return -rte_errno;
4003                                 dev_flow->dv.actions[actions_n++] =
4004                                         dev_flow->dv.encap_decap->verbs_action;
4005                         } else {
4006                                 /* Handle encap without preceding decap. */
4007                                 if (flow_dv_create_action_l2_encap
4008                                     (dev, actions, dev_flow, attr->transfer,
4009                                      error))
4010                                         return -rte_errno;
4011                                 dev_flow->dv.actions[actions_n++] =
4012                                         dev_flow->dv.encap_decap->verbs_action;
4013                         }
4014                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
4015                         break;
4016                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4017                         /* Check if this decap is followed by encap. */
4018                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
4019                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
4020                                action++) {
4021                         }
4022                         /* Handle decap only if it isn't followed by encap. */
4023                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4024                                 if (flow_dv_create_action_l2_decap
4025                                     (dev, dev_flow, attr->transfer, error))
4026                                         return -rte_errno;
4027                                 dev_flow->dv.actions[actions_n++] =
4028                                         dev_flow->dv.encap_decap->verbs_action;
4029                         }
4030                         /* If decap is followed by encap, handle it at encap. */
4031                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
4032                         break;
4033                 case RTE_FLOW_ACTION_TYPE_JUMP:
4034                         jump_data = action->conf;
4035                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
4036                                                        MLX5_GROUP_FACTOR,
4037                                                        attr->egress,
4038                                                        attr->transfer, error);
4039                         if (!tbl)
4040                                 return rte_flow_error_set
4041                                                 (error, errno,
4042                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4043                                                  NULL,
4044                                                  "cannot create jump action.");
4045                         jump_tbl_resource.tbl = tbl;
4046                         if (flow_dv_jump_tbl_resource_register
4047                             (dev, &jump_tbl_resource, dev_flow, error)) {
4048                                 flow_dv_tbl_resource_release(tbl);
4049                                 return rte_flow_error_set
4050                                                 (error, errno,
4051                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4052                                                  NULL,
4053                                                  "cannot create jump action.");
4054                         }
4055                         dev_flow->dv.actions[actions_n++] =
4056                                 dev_flow->dv.jump->action;
4057                         action_flags |= MLX5_FLOW_ACTION_JUMP;
4058                         break;
4059                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4060                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4061                         if (flow_dv_convert_action_modify_mac(&res, actions,
4062                                                               error))
4063                                 return -rte_errno;
4064                         action_flags |= actions->type ==
4065                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4066                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
4067                                         MLX5_FLOW_ACTION_SET_MAC_DST;
4068                         break;
4069                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4070                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4071                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
4072                                                                error))
4073                                 return -rte_errno;
4074                         action_flags |= actions->type ==
4075                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4076                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
4077                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
4078                         break;
4079                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4080                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4081                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
4082                                                                error))
4083                                 return -rte_errno;
4084                         action_flags |= actions->type ==
4085                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4086                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
4087                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
4088                         break;
4089                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4090                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4091                         if (flow_dv_convert_action_modify_tp(&res, actions,
4092                                                              items, &flow_attr,
4093                                                              error))
4094                                 return -rte_errno;
4095                         action_flags |= actions->type ==
4096                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
4097                                         MLX5_FLOW_ACTION_SET_TP_SRC :
4098                                         MLX5_FLOW_ACTION_SET_TP_DST;
4099                         break;
4100                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4101                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
4102                                                                   &flow_attr,
4103                                                                   error))
4104                                 return -rte_errno;
4105                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
4106                         break;
4107                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4108                         if (flow_dv_convert_action_modify_ttl(&res, actions,
4109                                                              items, &flow_attr,
4110                                                              error))
4111                                 return -rte_errno;
4112                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
4113                         break;
4114                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4115                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4116                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
4117                                                                   error))
4118                                 return -rte_errno;
4119                         action_flags |= actions->type ==
4120                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4121                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
4122                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4123                         break;
4124
4125                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4126                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4127                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
4128                                                                   error))
4129                                 return -rte_errno;
4130                         action_flags |= actions->type ==
4131                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4132                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
4133                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
4134                         break;
4135                 case RTE_FLOW_ACTION_TYPE_END:
4136                         actions_end = true;
4137                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
4138                                 /* create modify action if needed. */
4139                                 if (flow_dv_modify_hdr_resource_register
4140                                                                 (dev, &res,
4141                                                                  dev_flow,
4142                                                                  error))
4143                                         return -rte_errno;
4144                                 dev_flow->dv.actions[modify_action_position] =
4145                                         dev_flow->dv.modify_hdr->verbs_action;
4146                         }
4147                         break;
4148                 default:
4149                         break;
4150                 }
4151                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
4152                     modify_action_position == UINT32_MAX)
4153                         modify_action_position = actions_n++;
4154         }
4155         dev_flow->dv.actions_n = actions_n;
4156         flow->actions = action_flags;
4157         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4158                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4159
4160                 switch (items->type) {
4161                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4162                         flow_dv_translate_item_port_id(dev, match_mask,
4163                                                        match_value, items);
4164                         last_item = MLX5_FLOW_ITEM_PORT_ID;
4165                         break;
4166                 case RTE_FLOW_ITEM_TYPE_ETH:
4167                         flow_dv_translate_item_eth(match_mask, match_value,
4168                                                    items, tunnel);
4169                         matcher.priority = MLX5_PRIORITY_MAP_L2;
4170                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4171                                              MLX5_FLOW_LAYER_OUTER_L2;
4172                         break;
4173                 case RTE_FLOW_ITEM_TYPE_VLAN:
4174                         flow_dv_translate_item_vlan(match_mask, match_value,
4175                                                     items, tunnel);
4176                         matcher.priority = MLX5_PRIORITY_MAP_L2;
4177                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
4178                                               MLX5_FLOW_LAYER_INNER_VLAN) :
4179                                              (MLX5_FLOW_LAYER_OUTER_L2 |
4180                                               MLX5_FLOW_LAYER_OUTER_VLAN);
4181                         break;
4182                 case RTE_FLOW_ITEM_TYPE_IPV4:
4183                         flow_dv_translate_item_ipv4(match_mask, match_value,
4184                                                     items, tunnel, attr->group);
4185                         matcher.priority = MLX5_PRIORITY_MAP_L3;
4186                         dev_flow->dv.hash_fields |=
4187                                 mlx5_flow_hashfields_adjust
4188                                         (dev_flow, tunnel,
4189                                          MLX5_IPV4_LAYER_TYPES,
4190                                          MLX5_IPV4_IBV_RX_HASH);
4191                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4192                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4193                         break;
4194                 case RTE_FLOW_ITEM_TYPE_IPV6:
4195                         flow_dv_translate_item_ipv6(match_mask, match_value,
4196                                                     items, tunnel, attr->group);
4197                         matcher.priority = MLX5_PRIORITY_MAP_L3;
4198                         dev_flow->dv.hash_fields |=
4199                                 mlx5_flow_hashfields_adjust
4200                                         (dev_flow, tunnel,
4201                                          MLX5_IPV6_LAYER_TYPES,
4202                                          MLX5_IPV6_IBV_RX_HASH);
4203                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4204                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4205                         break;
4206                 case RTE_FLOW_ITEM_TYPE_TCP:
4207                         flow_dv_translate_item_tcp(match_mask, match_value,
4208                                                    items, tunnel);
4209                         matcher.priority = MLX5_PRIORITY_MAP_L4;
4210                         dev_flow->dv.hash_fields |=
4211                                 mlx5_flow_hashfields_adjust
4212                                         (dev_flow, tunnel, ETH_RSS_TCP,
4213                                          IBV_RX_HASH_SRC_PORT_TCP |
4214                                          IBV_RX_HASH_DST_PORT_TCP);
4215                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
4216                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
4217                         break;
4218                 case RTE_FLOW_ITEM_TYPE_UDP:
4219                         flow_dv_translate_item_udp(match_mask, match_value,
4220                                                    items, tunnel);
4221                         matcher.priority = MLX5_PRIORITY_MAP_L4;
4222                         dev_flow->dv.hash_fields |=
4223                                 mlx5_flow_hashfields_adjust
4224                                         (dev_flow, tunnel, ETH_RSS_UDP,
4225                                          IBV_RX_HASH_SRC_PORT_UDP |
4226                                          IBV_RX_HASH_DST_PORT_UDP);
4227                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
4228                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
4229                         break;
4230                 case RTE_FLOW_ITEM_TYPE_GRE:
4231                         flow_dv_translate_item_gre(match_mask, match_value,
4232                                                    items, tunnel);
4233                         last_item = MLX5_FLOW_LAYER_GRE;
4234                         break;
4235                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4236                         flow_dv_translate_item_nvgre(match_mask, match_value,
4237                                                      items, tunnel);
4238                         last_item = MLX5_FLOW_LAYER_GRE;
4239                         break;
4240                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4241                         flow_dv_translate_item_vxlan(match_mask, match_value,
4242                                                      items, tunnel);
4243                         last_item = MLX5_FLOW_LAYER_VXLAN;
4244                         break;
4245                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4246                         flow_dv_translate_item_vxlan(match_mask, match_value,
4247                                                      items, tunnel);
4248                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4249                         break;
4250                 case RTE_FLOW_ITEM_TYPE_MPLS:
4251                         flow_dv_translate_item_mpls(match_mask, match_value,
4252                                                     items, last_item, tunnel);
4253                         last_item = MLX5_FLOW_LAYER_MPLS;
4254                         break;
4255                 case RTE_FLOW_ITEM_TYPE_META:
4256                         flow_dv_translate_item_meta(match_mask, match_value,
4257                                                     items);
4258                         last_item = MLX5_FLOW_ITEM_METADATA;
4259                         break;
4260                 default:
4261                         break;
4262                 }
4263                 item_flags |= last_item;
4264         }
4265         /*
4266          * In case of ingress traffic when E-Switch mode is enabled,
4267          * we have two cases where we need to set the source port manually.
4268          * The first one, is in case of Nic steering rule, and the second is
4269          * E-Switch rule where no port_id item was found. In both cases
4270          * the source port is set according the current port in use.
4271          */
4272         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
4273             (priv->representor || priv->master)) {
4274                 if (flow_dv_translate_item_port_id(dev, match_mask,
4275                                                    match_value, NULL))
4276                         return -rte_errno;
4277         }
4278         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4279                                          dev_flow->dv.value.buf));
4280         dev_flow->layers = item_flags;
4281         /* Register matcher. */
4282         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4283                                     matcher.mask.size);
4284         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4285                                                      matcher.priority);
4286         matcher.egress = attr->egress;
4287         matcher.group = attr->group;
4288         matcher.transfer = attr->transfer;
4289         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4290                 return -rte_errno;
4291         return 0;
4292 }
4293
4294 /**
4295  * Apply the flow to the NIC.
4296  *
4297  * @param[in] dev
4298  *   Pointer to the Ethernet device structure.
4299  * @param[in, out] flow
4300  *   Pointer to flow structure.
4301  * @param[out] error
4302  *   Pointer to error structure.
4303  *
4304  * @return
4305  *   0 on success, a negative errno value otherwise and rte_errno is set.
4306  */
4307 static int
4308 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4309               struct rte_flow_error *error)
4310 {
4311         struct mlx5_flow_dv *dv;
4312         struct mlx5_flow *dev_flow;
4313         struct mlx5_priv *priv = dev->data->dev_private;
4314         int n;
4315         int err;
4316
4317         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4318                 dv = &dev_flow->dv;
4319                 n = dv->actions_n;
4320                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4321                         if (flow->transfer) {
4322                                 dv->actions[n++] = priv->sh->esw_drop_action;
4323                         } else {
4324                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
4325                                 if (!dv->hrxq) {
4326                                         rte_flow_error_set
4327                                                 (error, errno,
4328                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4329                                                  NULL,
4330                                                  "cannot get drop hash queue");
4331                                         goto error;
4332                                 }
4333                                 dv->actions[n++] = dv->hrxq->action;
4334                         }
4335                 } else if (flow->actions &
4336                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4337                         struct mlx5_hrxq *hrxq;
4338
4339                         hrxq = mlx5_hrxq_get(dev, flow->key,
4340                                              MLX5_RSS_HASH_KEY_LEN,
4341                                              dv->hash_fields,
4342                                              (*flow->queue),
4343                                              flow->rss.queue_num);
4344                         if (!hrxq)
4345                                 hrxq = mlx5_hrxq_new
4346                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4347                                          dv->hash_fields, (*flow->queue),
4348                                          flow->rss.queue_num,
4349                                          !!(dev_flow->layers &
4350                                             MLX5_FLOW_LAYER_TUNNEL));
4351                         if (!hrxq) {
4352                                 rte_flow_error_set
4353                                         (error, rte_errno,
4354                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4355                                          "cannot get hash queue");
4356                                 goto error;
4357                         }
4358                         dv->hrxq = hrxq;
4359                         dv->actions[n++] = dv->hrxq->action;
4360                 }
4361                 dv->flow =
4362                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4363                                                   (void *)&dv->value, n,
4364                                                   dv->actions);
4365                 if (!dv->flow) {
4366                         rte_flow_error_set(error, errno,
4367                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4368                                            NULL,
4369                                            "hardware refuses to create flow");
4370                         goto error;
4371                 }
4372         }
4373         return 0;
4374 error:
4375         err = rte_errno; /* Save rte_errno before cleanup. */
4376         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4377                 struct mlx5_flow_dv *dv = &dev_flow->dv;
4378                 if (dv->hrxq) {
4379                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4380                                 mlx5_hrxq_drop_release(dev);
4381                         else
4382                                 mlx5_hrxq_release(dev, dv->hrxq);
4383                         dv->hrxq = NULL;
4384                 }
4385         }
4386         rte_errno = err; /* Restore rte_errno. */
4387         return -rte_errno;
4388 }
4389
4390 /**
4391  * Release the flow matcher.
4392  *
4393  * @param dev
4394  *   Pointer to Ethernet device.
4395  * @param flow
4396  *   Pointer to mlx5_flow.
4397  *
4398  * @return
4399  *   1 while a reference on it exists, 0 when freed.
4400  */
4401 static int
4402 flow_dv_matcher_release(struct rte_eth_dev *dev,
4403                         struct mlx5_flow *flow)
4404 {
4405         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4406         struct mlx5_priv *priv = dev->data->dev_private;
4407         struct mlx5_ibv_shared *sh = priv->sh;
4408         struct mlx5_flow_tbl_resource *tbl;
4409
4410         assert(matcher->matcher_object);
4411         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4412                 dev->data->port_id, (void *)matcher,
4413                 rte_atomic32_read(&matcher->refcnt));
4414         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4415                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4416                            (matcher->matcher_object));
4417                 LIST_REMOVE(matcher, next);
4418                 if (matcher->egress)
4419                         tbl = &sh->tx_tbl[matcher->group];
4420                 else
4421                         tbl = &sh->rx_tbl[matcher->group];
4422                 flow_dv_tbl_resource_release(tbl);
4423                 rte_free(matcher);
4424                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4425                         dev->data->port_id, (void *)matcher);
4426                 return 0;
4427         }
4428         return 1;
4429 }
4430
4431 /**
4432  * Release an encap/decap resource.
4433  *
4434  * @param flow
4435  *   Pointer to mlx5_flow.
4436  *
4437  * @return
4438  *   1 while a reference on it exists, 0 when freed.
4439  */
4440 static int
4441 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4442 {
4443         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4444                                                 flow->dv.encap_decap;
4445
4446         assert(cache_resource->verbs_action);
4447         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4448                 (void *)cache_resource,
4449                 rte_atomic32_read(&cache_resource->refcnt));
4450         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4451                 claim_zero(mlx5_glue->destroy_flow_action
4452                                 (cache_resource->verbs_action));
4453                 LIST_REMOVE(cache_resource, next);
4454                 rte_free(cache_resource);
4455                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4456                         (void *)cache_resource);
4457                 return 0;
4458         }
4459         return 1;
4460 }
4461
4462 /**
4463  * Release an jump to table action resource.
4464  *
4465  * @param flow
4466  *   Pointer to mlx5_flow.
4467  *
4468  * @return
4469  *   1 while a reference on it exists, 0 when freed.
4470  */
4471 static int
4472 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4473 {
4474         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4475                                                 flow->dv.jump;
4476
4477         assert(cache_resource->action);
4478         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4479                 (void *)cache_resource,
4480                 rte_atomic32_read(&cache_resource->refcnt));
4481         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4482                 claim_zero(mlx5_glue->destroy_flow_action
4483                                 (cache_resource->action));
4484                 LIST_REMOVE(cache_resource, next);
4485                 flow_dv_tbl_resource_release(cache_resource->tbl);
4486                 rte_free(cache_resource);
4487                 DRV_LOG(DEBUG, "jump table resource %p: removed",
4488                         (void *)cache_resource);
4489                 return 0;
4490         }
4491         return 1;
4492 }
4493
4494 /**
4495  * Release a modify-header resource.
4496  *
4497  * @param flow
4498  *   Pointer to mlx5_flow.
4499  *
4500  * @return
4501  *   1 while a reference on it exists, 0 when freed.
4502  */
4503 static int
4504 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4505 {
4506         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4507                                                 flow->dv.modify_hdr;
4508
4509         assert(cache_resource->verbs_action);
4510         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4511                 (void *)cache_resource,
4512                 rte_atomic32_read(&cache_resource->refcnt));
4513         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4514                 claim_zero(mlx5_glue->destroy_flow_action
4515                                 (cache_resource->verbs_action));
4516                 LIST_REMOVE(cache_resource, next);
4517                 rte_free(cache_resource);
4518                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4519                         (void *)cache_resource);
4520                 return 0;
4521         }
4522         return 1;
4523 }
4524
4525 /**
4526  * Release port ID action resource.
4527  *
4528  * @param flow
4529  *   Pointer to mlx5_flow.
4530  *
4531  * @return
4532  *   1 while a reference on it exists, 0 when freed.
4533  */
4534 static int
4535 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4536 {
4537         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4538                 flow->dv.port_id_action;
4539
4540         assert(cache_resource->action);
4541         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4542                 (void *)cache_resource,
4543                 rte_atomic32_read(&cache_resource->refcnt));
4544         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4545                 claim_zero(mlx5_glue->destroy_flow_action
4546                                 (cache_resource->action));
4547                 LIST_REMOVE(cache_resource, next);
4548                 rte_free(cache_resource);
4549                 DRV_LOG(DEBUG, "port id action resource %p: removed",
4550                         (void *)cache_resource);
4551                 return 0;
4552         }
4553         return 1;
4554 }
4555
4556 /**
4557  * Remove the flow from the NIC but keeps it in memory.
4558  *
4559  * @param[in] dev
4560  *   Pointer to Ethernet device.
4561  * @param[in, out] flow
4562  *   Pointer to flow structure.
4563  */
4564 static void
4565 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4566 {
4567         struct mlx5_flow_dv *dv;
4568         struct mlx5_flow *dev_flow;
4569
4570         if (!flow)
4571                 return;
4572         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4573                 dv = &dev_flow->dv;
4574                 if (dv->flow) {
4575                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4576                         dv->flow = NULL;
4577                 }
4578                 if (dv->hrxq) {
4579                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4580                                 mlx5_hrxq_drop_release(dev);
4581                         else
4582                                 mlx5_hrxq_release(dev, dv->hrxq);
4583                         dv->hrxq = NULL;
4584                 }
4585         }
4586 }
4587
4588 /**
4589  * Remove the flow from the NIC and the memory.
4590  *
4591  * @param[in] dev
4592  *   Pointer to the Ethernet device structure.
4593  * @param[in, out] flow
4594  *   Pointer to flow structure.
4595  */
4596 static void
4597 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4598 {
4599         struct mlx5_flow *dev_flow;
4600
4601         if (!flow)
4602                 return;
4603         flow_dv_remove(dev, flow);
4604         if (flow->counter) {
4605                 flow_dv_counter_release(flow->counter);
4606                 flow->counter = NULL;
4607         }
4608         if (flow->tag_resource) {
4609                 flow_dv_tag_release(dev, flow->tag_resource);
4610                 flow->tag_resource = NULL;
4611         }
4612         while (!LIST_EMPTY(&flow->dev_flows)) {
4613                 dev_flow = LIST_FIRST(&flow->dev_flows);
4614                 LIST_REMOVE(dev_flow, next);
4615                 if (dev_flow->dv.matcher)
4616                         flow_dv_matcher_release(dev, dev_flow);
4617                 if (dev_flow->dv.encap_decap)
4618                         flow_dv_encap_decap_resource_release(dev_flow);
4619                 if (dev_flow->dv.modify_hdr)
4620                         flow_dv_modify_hdr_resource_release(dev_flow);
4621                 if (dev_flow->dv.jump)
4622                         flow_dv_jump_tbl_resource_release(dev_flow);
4623                 if (dev_flow->dv.port_id_action)
4624                         flow_dv_port_id_action_resource_release(dev_flow);
4625                 rte_free(dev_flow);
4626         }
4627 }
4628
4629 /**
4630  * Query a dv flow  rule for its statistics via devx.
4631  *
4632  * @param[in] dev
4633  *   Pointer to Ethernet device.
4634  * @param[in] flow
4635  *   Pointer to the sub flow.
4636  * @param[out] data
4637  *   data retrieved by the query.
4638  * @param[out] error
4639  *   Perform verbose error reporting if not NULL.
4640  *
4641  * @return
4642  *   0 on success, a negative errno value otherwise and rte_errno is set.
4643  */
4644 static int
4645 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4646                     void *data, struct rte_flow_error *error)
4647 {
4648         struct mlx5_priv *priv = dev->data->dev_private;
4649         struct rte_flow_query_count *qc = data;
4650         uint64_t pkts = 0;
4651         uint64_t bytes = 0;
4652         int err;
4653
4654         if (!priv->config.devx)
4655                 return rte_flow_error_set(error, ENOTSUP,
4656                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4657                                           NULL,
4658                                           "counters are not supported");
4659         if (flow->counter) {
4660                 err = mlx5_devx_cmd_flow_counter_query
4661                                                 (flow->counter->dcs,
4662                                                  qc->reset, &pkts, &bytes);
4663                 if (err)
4664                         return rte_flow_error_set
4665                                 (error, err,
4666                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4667                                  NULL,
4668                                  "cannot read counters");
4669                 qc->hits_set = 1;
4670                 qc->bytes_set = 1;
4671                 qc->hits = pkts - flow->counter->hits;
4672                 qc->bytes = bytes - flow->counter->bytes;
4673                 if (qc->reset) {
4674                         flow->counter->hits = pkts;
4675                         flow->counter->bytes = bytes;
4676                 }
4677                 return 0;
4678         }
4679         return rte_flow_error_set(error, EINVAL,
4680                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4681                                   NULL,
4682                                   "counters are not available");
4683 }
4684
4685 /**
4686  * Query a flow.
4687  *
4688  * @see rte_flow_query()
4689  * @see rte_flow_ops
4690  */
4691 static int
4692 flow_dv_query(struct rte_eth_dev *dev,
4693               struct rte_flow *flow __rte_unused,
4694               const struct rte_flow_action *actions __rte_unused,
4695               void *data __rte_unused,
4696               struct rte_flow_error *error __rte_unused)
4697 {
4698         int ret = -EINVAL;
4699
4700         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4701                 switch (actions->type) {
4702                 case RTE_FLOW_ACTION_TYPE_VOID:
4703                         break;
4704                 case RTE_FLOW_ACTION_TYPE_COUNT:
4705                         ret = flow_dv_query_count(dev, flow, data, error);
4706                         break;
4707                 default:
4708                         return rte_flow_error_set(error, ENOTSUP,
4709                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4710                                                   actions,
4711                                                   "action not supported");
4712                 }
4713         }
4714         return ret;
4715 }
4716
4717 /*
4718  * Mutex-protected thunk to flow_dv_translate().
4719  */
4720 static int
4721 flow_d_translate(struct rte_eth_dev *dev,
4722                  struct mlx5_flow *dev_flow,
4723                  const struct rte_flow_attr *attr,
4724                  const struct rte_flow_item items[],
4725                  const struct rte_flow_action actions[],
4726                  struct rte_flow_error *error)
4727 {
4728         int ret;
4729
4730         flow_d_shared_lock(dev);
4731         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4732         flow_d_shared_unlock(dev);
4733         return ret;
4734 }
4735
4736 /*
4737  * Mutex-protected thunk to flow_dv_apply().
4738  */
4739 static int
4740 flow_d_apply(struct rte_eth_dev *dev,
4741              struct rte_flow *flow,
4742              struct rte_flow_error *error)
4743 {
4744         int ret;
4745
4746         flow_d_shared_lock(dev);
4747         ret = flow_dv_apply(dev, flow, error);
4748         flow_d_shared_unlock(dev);
4749         return ret;
4750 }
4751
4752 /*
4753  * Mutex-protected thunk to flow_dv_remove().
4754  */
4755 static void
4756 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4757 {
4758         flow_d_shared_lock(dev);
4759         flow_dv_remove(dev, flow);
4760         flow_d_shared_unlock(dev);
4761 }
4762
4763 /*
4764  * Mutex-protected thunk to flow_dv_destroy().
4765  */
4766 static void
4767 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4768 {
4769         flow_d_shared_lock(dev);
4770         flow_dv_destroy(dev, flow);
4771         flow_d_shared_unlock(dev);
4772 }
4773
4774 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4775         .validate = flow_dv_validate,
4776         .prepare = flow_dv_prepare,
4777         .translate = flow_d_translate,
4778         .apply = flow_d_apply,
4779         .remove = flow_d_remove,
4780         .destroy = flow_d_destroy,
4781         .query = flow_dv_query,
4782 };
4783
4784 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */