c9aa50f40598066aaa8e5febec1249b27acc361d
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
28 #include <rte_ip.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_prm.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 /**
39  * Verify the @p attributes will be correctly understood by the NIC and store
40  * them in the @p flow if everything is correct.
41  *
42  * @param[in] dev
43  *   Pointer to dev struct.
44  * @param[in] attributes
45  *   Pointer to flow attributes
46  * @param[out] error
47  *   Pointer to error structure.
48  *
49  * @return
50  *   0 on success, a negative errno value otherwise and rte_errno is set.
51  */
52 static int
53 flow_dv_validate_attributes(struct rte_eth_dev *dev,
54                             const struct rte_flow_attr *attributes,
55                             struct rte_flow_error *error)
56 {
57         struct priv *priv = dev->data->dev_private;
58         uint32_t priority_max = priv->config.flow_prio - 1;
59
60         if (attributes->group)
61                 return rte_flow_error_set(error, ENOTSUP,
62                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
63                                           NULL,
64                                           "groups is not supported");
65         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
66             attributes->priority >= priority_max)
67                 return rte_flow_error_set(error, ENOTSUP,
68                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
69                                           NULL,
70                                           "priority out of range");
71         if (attributes->transfer)
72                 return rte_flow_error_set(error, ENOTSUP,
73                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
74                                           NULL,
75                                           "transfer is not supported");
76         if (!(attributes->egress ^ attributes->ingress))
77                 return rte_flow_error_set(error, ENOTSUP,
78                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
79                                           "must specify exactly one of "
80                                           "ingress or egress");
81         return 0;
82 }
83
84 /**
85  * Internal validation function. For validating both actions and items.
86  *
87  * @param[in] dev
88  *   Pointer to the rte_eth_dev structure.
89  * @param[in] attr
90  *   Pointer to the flow attributes.
91  * @param[in] items
92  *   Pointer to the list of items.
93  * @param[in] actions
94  *   Pointer to the list of actions.
95  * @param[out] error
96  *   Pointer to the error structure.
97  *
98  * @return
99  *   0 on success, a negative errno value otherwise and rte_ernno is set.
100  */
101 static int
102 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
103                  const struct rte_flow_item items[],
104                  const struct rte_flow_action actions[],
105                  struct rte_flow_error *error)
106 {
107         int ret;
108         uint32_t action_flags = 0;
109         uint32_t item_flags = 0;
110         int tunnel = 0;
111         uint8_t next_protocol = 0xff;
112         int actions_n = 0;
113
114         if (items == NULL)
115                 return -1;
116         ret = flow_dv_validate_attributes(dev, attr, error);
117         if (ret < 0)
118                 return ret;
119         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
120                 switch (items->type) {
121                 case RTE_FLOW_ITEM_TYPE_VOID:
122                         break;
123                 case RTE_FLOW_ITEM_TYPE_ETH:
124                         ret = mlx5_flow_validate_item_eth(items, item_flags,
125                                                           error);
126                         if (ret < 0)
127                                 return ret;
128                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
129                                                MLX5_FLOW_LAYER_OUTER_L2;
130                         break;
131                 case RTE_FLOW_ITEM_TYPE_VLAN:
132                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
133                                                            error);
134                         if (ret < 0)
135                                 return ret;
136                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
137                                                MLX5_FLOW_LAYER_OUTER_VLAN;
138                         break;
139                 case RTE_FLOW_ITEM_TYPE_IPV4:
140                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
141                                                            error);
142                         if (ret < 0)
143                                 return ret;
144                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
145                                                MLX5_FLOW_LAYER_OUTER_L3_IPV4;
146                         if (items->mask != NULL &&
147                             ((const struct rte_flow_item_ipv4 *)
148                              items->mask)->hdr.next_proto_id)
149                                 next_protocol =
150                                         ((const struct rte_flow_item_ipv4 *)
151                                          (items->spec))->hdr.next_proto_id;
152                         break;
153                 case RTE_FLOW_ITEM_TYPE_IPV6:
154                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
155                                                            error);
156                         if (ret < 0)
157                                 return ret;
158                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
159                                                MLX5_FLOW_LAYER_OUTER_L3_IPV6;
160                         if (items->mask != NULL &&
161                             ((const struct rte_flow_item_ipv6 *)
162                              items->mask)->hdr.proto)
163                                 next_protocol =
164                                         ((const struct rte_flow_item_ipv6 *)
165                                          items->spec)->hdr.proto;
166                         break;
167                 case RTE_FLOW_ITEM_TYPE_UDP:
168                         ret = mlx5_flow_validate_item_udp(items, item_flags,
169                                                           next_protocol,
170                                                           error);
171                         if (ret < 0)
172                                 return ret;
173                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
174                                                MLX5_FLOW_LAYER_OUTER_L4_UDP;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_TCP:
177                         ret = mlx5_flow_validate_item_tcp(items, item_flags,
178                                                           next_protocol, error);
179                         if (ret < 0)
180                                 return ret;
181                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
182                                                MLX5_FLOW_LAYER_OUTER_L4_TCP;
183                         break;
184                 case RTE_FLOW_ITEM_TYPE_VXLAN:
185                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
186                                                             error);
187                         if (ret < 0)
188                                 return ret;
189                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
190                         break;
191                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
192                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
193                                                                 item_flags, dev,
194                                                                 error);
195                         if (ret < 0)
196                                 return ret;
197                         item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
198                         break;
199                 case RTE_FLOW_ITEM_TYPE_GRE:
200                         ret = mlx5_flow_validate_item_gre(items, item_flags,
201                                                           next_protocol, error);
202                         if (ret < 0)
203                                 return ret;
204                         item_flags |= MLX5_FLOW_LAYER_GRE;
205                         break;
206                 case RTE_FLOW_ITEM_TYPE_MPLS:
207                         ret = mlx5_flow_validate_item_mpls(items, item_flags,
208                                                            next_protocol,
209                                                            error);
210                         if (ret < 0)
211                                 return ret;
212                         item_flags |= MLX5_FLOW_LAYER_MPLS;
213                         break;
214                 default:
215                         return rte_flow_error_set(error, ENOTSUP,
216                                                   RTE_FLOW_ERROR_TYPE_ITEM,
217                                                   NULL, "item not supported");
218                 }
219         }
220         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
221                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
222                         return rte_flow_error_set(error, ENOTSUP,
223                                                   RTE_FLOW_ERROR_TYPE_ACTION,
224                                                   actions, "too many actions");
225                 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
226                 switch (actions->type) {
227                 case RTE_FLOW_ACTION_TYPE_VOID:
228                         break;
229                 case RTE_FLOW_ACTION_TYPE_FLAG:
230                         ret = mlx5_flow_validate_action_flag(action_flags,
231                                                              attr, error);
232                         if (ret < 0)
233                                 return ret;
234                         action_flags |= MLX5_FLOW_ACTION_FLAG;
235                         ++actions_n;
236                         break;
237                 case RTE_FLOW_ACTION_TYPE_MARK:
238                         ret = mlx5_flow_validate_action_mark(actions,
239                                                              action_flags,
240                                                              attr, error);
241                         if (ret < 0)
242                                 return ret;
243                         action_flags |= MLX5_FLOW_ACTION_MARK;
244                         ++actions_n;
245                         break;
246                 case RTE_FLOW_ACTION_TYPE_DROP:
247                         ret = mlx5_flow_validate_action_drop(action_flags,
248                                                              attr, error);
249                         if (ret < 0)
250                                 return ret;
251                         action_flags |= MLX5_FLOW_ACTION_DROP;
252                         ++actions_n;
253                         break;
254                 case RTE_FLOW_ACTION_TYPE_QUEUE:
255                         ret = mlx5_flow_validate_action_queue(actions,
256                                                               action_flags, dev,
257                                                               attr, error);
258                         if (ret < 0)
259                                 return ret;
260                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
261                         ++actions_n;
262                         break;
263                 case RTE_FLOW_ACTION_TYPE_RSS:
264                         ret = mlx5_flow_validate_action_rss(actions,
265                                                             action_flags, dev,
266                                                             attr, error);
267                         if (ret < 0)
268                                 return ret;
269                         action_flags |= MLX5_FLOW_ACTION_RSS;
270                         ++actions_n;
271                         break;
272                 case RTE_FLOW_ACTION_TYPE_COUNT:
273                         ret = mlx5_flow_validate_action_count(dev, attr, error);
274                         if (ret < 0)
275                                 return ret;
276                         action_flags |= MLX5_FLOW_ACTION_COUNT;
277                         ++actions_n;
278                         break;
279                 default:
280                         return rte_flow_error_set(error, ENOTSUP,
281                                                   RTE_FLOW_ERROR_TYPE_ACTION,
282                                                   actions,
283                                                   "action not supported");
284                 }
285         }
286         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
287                 return rte_flow_error_set(error, EINVAL,
288                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
289                                           "no fate action is found");
290         return 0;
291 }
292
293 /**
294  * Internal preparation function. Allocates the DV flow size,
295  * this size is constant.
296  *
297  * @param[in] attr
298  *   Pointer to the flow attributes.
299  * @param[in] items
300  *   Pointer to the list of items.
301  * @param[in] actions
302  *   Pointer to the list of actions.
303  * @param[out] item_flags
304  *   Pointer to bit mask of all items detected.
305  * @param[out] action_flags
306  *   Pointer to bit mask of all actions detected.
307  * @param[out] error
308  *   Pointer to the error structure.
309  *
310  * @return
311  *   Pointer to mlx5_flow object on success,
312  *   otherwise NULL and rte_ernno is set.
313  */
314 static struct mlx5_flow *
315 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
316                 const struct rte_flow_item items[] __rte_unused,
317                 const struct rte_flow_action actions[] __rte_unused,
318                 uint64_t *item_flags __rte_unused,
319                 uint64_t *action_flags __rte_unused,
320                 struct rte_flow_error *error)
321 {
322         uint32_t size = sizeof(struct mlx5_flow);
323         struct mlx5_flow *flow;
324
325         flow = rte_calloc(__func__, 1, size, 0);
326         if (!flow) {
327                 rte_flow_error_set(error, ENOMEM,
328                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
329                                    "not enough memory to create flow");
330                 return NULL;
331         }
332         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
333         return flow;
334 }
335
336 /**
337  * Add Ethernet item to matcher and to the value.
338  *
339  * @param[in, out] matcher
340  *   Flow matcher.
341  * @param[in, out] key
342  *   Flow matcher value.
343  * @param[in] item
344  *   Flow pattern to translate.
345  * @param[in] inner
346  *   Item is inner pattern.
347  */
348 static void
349 flow_dv_translate_item_eth(void *matcher, void *key,
350                            const struct rte_flow_item *item, int inner)
351 {
352         const struct rte_flow_item_eth *eth_m = item->mask;
353         const struct rte_flow_item_eth *eth_v = item->spec;
354         const struct rte_flow_item_eth nic_mask = {
355                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
356                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
357                 .type = RTE_BE16(0xffff),
358         };
359         void *headers_m;
360         void *headers_v;
361         char *l24_v;
362         unsigned int i;
363
364         if (!eth_v)
365                 return;
366         if (!eth_m)
367                 eth_m = &nic_mask;
368         if (inner) {
369                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
370                                          inner_headers);
371                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
372         } else {
373                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
374                                          outer_headers);
375                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
376         }
377         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
378                &eth_m->dst, sizeof(eth_m->dst));
379         /* The value must be in the range of the mask. */
380         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
381         for (i = 0; i < sizeof(eth_m->dst); ++i)
382                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
383         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
384                &eth_m->src, sizeof(eth_m->src));
385         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
386         /* The value must be in the range of the mask. */
387         for (i = 0; i < sizeof(eth_m->dst); ++i)
388                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
389         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
390                  rte_be_to_cpu_16(eth_m->type));
391         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
392         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
393 }
394
395 /**
396  * Add VLAN item to matcher and to the value.
397  *
398  * @param[in, out] matcher
399  *   Flow matcher.
400  * @param[in, out] key
401  *   Flow matcher value.
402  * @param[in] item
403  *   Flow pattern to translate.
404  * @param[in] inner
405  *   Item is inner pattern.
406  */
407 static void
408 flow_dv_translate_item_vlan(void *matcher, void *key,
409                             const struct rte_flow_item *item,
410                             int inner)
411 {
412         const struct rte_flow_item_vlan *vlan_m = item->mask;
413         const struct rte_flow_item_vlan *vlan_v = item->spec;
414         const struct rte_flow_item_vlan nic_mask = {
415                 .tci = RTE_BE16(0x0fff),
416                 .inner_type = RTE_BE16(0xffff),
417         };
418         void *headers_m;
419         void *headers_v;
420         uint16_t tci_m;
421         uint16_t tci_v;
422
423         if (!vlan_v)
424                 return;
425         if (!vlan_m)
426                 vlan_m = &nic_mask;
427         if (inner) {
428                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
429                                          inner_headers);
430                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
431         } else {
432                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
433                                          outer_headers);
434                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
435         }
436         tci_m = rte_be_to_cpu_16(vlan_m->tci);
437         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
438         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
439         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
440         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
441         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
442         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
443         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
444         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
445         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
446 }
447
448 /**
449  * Add IPV4 item to matcher and to the value.
450  *
451  * @param[in, out] matcher
452  *   Flow matcher.
453  * @param[in, out] key
454  *   Flow matcher value.
455  * @param[in] item
456  *   Flow pattern to translate.
457  * @param[in] inner
458  *   Item is inner pattern.
459  */
460 static void
461 flow_dv_translate_item_ipv4(void *matcher, void *key,
462                             const struct rte_flow_item *item,
463                             int inner)
464 {
465         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
466         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
467         const struct rte_flow_item_ipv4 nic_mask = {
468                 .hdr = {
469                         .src_addr = RTE_BE32(0xffffffff),
470                         .dst_addr = RTE_BE32(0xffffffff),
471                         .type_of_service = 0xff,
472                         .next_proto_id = 0xff,
473                 },
474         };
475         void *headers_m;
476         void *headers_v;
477         char *l24_m;
478         char *l24_v;
479         uint8_t tos;
480
481         if (!ipv4_v)
482                 return;
483         if (!ipv4_m)
484                 ipv4_m = &nic_mask;
485         if (inner) {
486                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
487                                          inner_headers);
488                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
489         } else {
490                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
491                                          outer_headers);
492                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
493         }
494         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
495         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
496         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
497                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
498         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
499                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
500         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
501         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
502         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
503                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
504         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
505                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
506         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
507         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
508         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
509         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
510                  ipv4_m->hdr.type_of_service);
511         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
512         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
513                  ipv4_m->hdr.type_of_service >> 2);
514         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
515         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
516                  ipv4_m->hdr.next_proto_id);
517         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
518                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
519 }
520
521 /**
522  * Add IPV6 item to matcher and to the value.
523  *
524  * @param[in, out] matcher
525  *   Flow matcher.
526  * @param[in, out] key
527  *   Flow matcher value.
528  * @param[in] item
529  *   Flow pattern to translate.
530  * @param[in] inner
531  *   Item is inner pattern.
532  */
533 static void
534 flow_dv_translate_item_ipv6(void *matcher, void *key,
535                             const struct rte_flow_item *item,
536                             int inner)
537 {
538         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
539         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
540         const struct rte_flow_item_ipv6 nic_mask = {
541                 .hdr = {
542                         .src_addr =
543                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
544                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
545                         .dst_addr =
546                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
547                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
548                         .vtc_flow = RTE_BE32(0xffffffff),
549                         .proto = 0xff,
550                         .hop_limits = 0xff,
551                 },
552         };
553         void *headers_m;
554         void *headers_v;
555         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
556         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
557         char *l24_m;
558         char *l24_v;
559         uint32_t vtc_m;
560         uint32_t vtc_v;
561         int i;
562         int size;
563
564         if (!ipv6_v)
565                 return;
566         if (!ipv6_m)
567                 ipv6_m = &nic_mask;
568         if (inner) {
569                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
570                                          inner_headers);
571                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
572         } else {
573                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
574                                          outer_headers);
575                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
576         }
577         size = sizeof(ipv6_m->hdr.dst_addr);
578         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
579                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
580         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
581                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
582         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
583         for (i = 0; i < size; ++i)
584                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
585         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
586                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
587         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
588                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
589         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
590         for (i = 0; i < size; ++i)
591                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
592         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
593         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
594         /* TOS. */
595         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
596         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
597         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
598         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
599         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
600         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
601         /* Label. */
602         if (inner) {
603                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
604                          vtc_m);
605                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
606                          vtc_v);
607         } else {
608                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
609                          vtc_m);
610                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
611                          vtc_v);
612         }
613         /* Protocol. */
614         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
615                  ipv6_m->hdr.proto);
616         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
617                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
618 }
619
620 /**
621  * Add TCP item to matcher and to the value.
622  *
623  * @param[in, out] matcher
624  *   Flow matcher.
625  * @param[in, out] key
626  *   Flow matcher value.
627  * @param[in] item
628  *   Flow pattern to translate.
629  * @param[in] inner
630  *   Item is inner pattern.
631  */
632 static void
633 flow_dv_translate_item_tcp(void *matcher, void *key,
634                            const struct rte_flow_item *item,
635                            int inner)
636 {
637         const struct rte_flow_item_tcp *tcp_m = item->mask;
638         const struct rte_flow_item_tcp *tcp_v = item->spec;
639         void *headers_m;
640         void *headers_v;
641
642         if (!tcp_v)
643                 return;
644         if (!tcp_m)
645                 tcp_m = &rte_flow_item_tcp_mask;
646         if (inner) {
647                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
648                                          inner_headers);
649                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
650         } else {
651                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
652                                          outer_headers);
653                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
654         }
655         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
656         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
657         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
658                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
659         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
660                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
661         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
662                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
663         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
664                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
665 }
666
667 /**
668  * Add UDP item to matcher and to the value.
669  *
670  * @param[in, out] matcher
671  *   Flow matcher.
672  * @param[in, out] key
673  *   Flow matcher value.
674  * @param[in] item
675  *   Flow pattern to translate.
676  * @param[in] inner
677  *   Item is inner pattern.
678  */
679 static void
680 flow_dv_translate_item_udp(void *matcher, void *key,
681                            const struct rte_flow_item *item,
682                            int inner)
683 {
684         const struct rte_flow_item_udp *udp_m = item->mask;
685         const struct rte_flow_item_udp *udp_v = item->spec;
686         void *headers_m;
687         void *headers_v;
688
689         if (!udp_v)
690                 return;
691         if (!udp_m)
692                 udp_m = &rte_flow_item_udp_mask;
693         if (inner) {
694                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
695                                          inner_headers);
696                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
697         } else {
698                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
699                                          outer_headers);
700                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
701         }
702         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
703         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
704         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
705                  rte_be_to_cpu_16(udp_m->hdr.src_port));
706         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
707                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
708         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
709                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
710         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
711                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
712 }
713
714 /**
715  * Add GRE item to matcher and to the value.
716  *
717  * @param[in, out] matcher
718  *   Flow matcher.
719  * @param[in, out] key
720  *   Flow matcher value.
721  * @param[in] item
722  *   Flow pattern to translate.
723  * @param[in] inner
724  *   Item is inner pattern.
725  */
726 static void
727 flow_dv_translate_item_gre(void *matcher, void *key,
728                            const struct rte_flow_item *item,
729                            int inner)
730 {
731         const struct rte_flow_item_gre *gre_m = item->mask;
732         const struct rte_flow_item_gre *gre_v = item->spec;
733         void *headers_m;
734         void *headers_v;
735         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
736         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
737
738         if (!gre_v)
739                 return;
740         if (!gre_m)
741                 gre_m = &rte_flow_item_gre_mask;
742         if (inner) {
743                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
744                                          inner_headers);
745                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
746         } else {
747                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
748                                          outer_headers);
749                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
750         }
751         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
752         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
753         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
754                  rte_be_to_cpu_16(gre_m->protocol));
755         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
756                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
757 }
758
759 /**
760  * Add NVGRE item to matcher and to the value.
761  *
762  * @param[in, out] matcher
763  *   Flow matcher.
764  * @param[in, out] key
765  *   Flow matcher value.
766  * @param[in] item
767  *   Flow pattern to translate.
768  * @param[in] inner
769  *   Item is inner pattern.
770  */
771 static void
772 flow_dv_translate_item_nvgre(void *matcher, void *key,
773                              const struct rte_flow_item *item,
774                              int inner)
775 {
776         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
777         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
778         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
779         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
780         char *gre_key_m;
781         char *gre_key_v;
782         int size;
783         int i;
784
785         if (!nvgre_v)
786                 return;
787         if (!nvgre_m)
788                 nvgre_m = &rte_flow_item_nvgre_mask;
789         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
790         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
791         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
792         memcpy(gre_key_m, nvgre_m->tni, size);
793         for (i = 0; i < size; ++i)
794                 gre_key_v[i] = gre_key_m[i] & ((const char *)(nvgre_v->tni))[i];
795         flow_dv_translate_item_gre(matcher, key, item, inner);
796 }
797
798 /**
799  * Add VXLAN item to matcher and to the value.
800  *
801  * @param[in, out] matcher
802  *   Flow matcher.
803  * @param[in, out] key
804  *   Flow matcher value.
805  * @param[in] item
806  *   Flow pattern to translate.
807  * @param[in] inner
808  *   Item is inner pattern.
809  */
810 static void
811 flow_dv_translate_item_vxlan(void *matcher, void *key,
812                              const struct rte_flow_item *item,
813                              int inner)
814 {
815         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
816         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
817         void *headers_m;
818         void *headers_v;
819         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
820         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
821         char *vni_m;
822         char *vni_v;
823         uint16_t dport;
824         int size;
825         int i;
826
827         if (!vxlan_v)
828                 return;
829         if (!vxlan_m)
830                 vxlan_m = &rte_flow_item_vxlan_mask;
831         if (inner) {
832                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
833                                          inner_headers);
834                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
835         } else {
836                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
837                                          outer_headers);
838                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
839         }
840         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
841                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
842         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
843                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
844                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
845         }
846         size = sizeof(vxlan_m->vni);
847         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
848         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
849         memcpy(vni_m, vxlan_m->vni, size);
850         for (i = 0; i < size; ++i)
851                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
852 }
853
854 /**
855  * Update the matcher and the value based the selected item.
856  *
857  * @param[in, out] matcher
858  *   Flow matcher.
859  * @param[in, out] key
860  *   Flow matcher value.
861  * @param[in] item
862  *   Flow pattern to translate.
863  * @param[in, out] dev_flow
864  *   Pointer to the mlx5_flow.
865  * @param[in] inner
866  *   Item is inner pattern.
867  */
868 static void
869 flow_dv_create_item(void *matcher, void *key,
870                     const struct rte_flow_item *item,
871                     struct mlx5_flow *dev_flow,
872                     int inner)
873 {
874         struct mlx5_flow_dv_matcher *tmatcher = matcher;
875
876         switch (item->type) {
877         case RTE_FLOW_ITEM_TYPE_VOID:
878         case RTE_FLOW_ITEM_TYPE_END:
879                 break;
880         case RTE_FLOW_ITEM_TYPE_ETH:
881                 flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
882                                            inner);
883                 tmatcher->priority = MLX5_PRIORITY_MAP_L2;
884                 break;
885         case RTE_FLOW_ITEM_TYPE_VLAN:
886                 flow_dv_translate_item_vlan(tmatcher->mask.buf, key, item,
887                                             inner);
888                 break;
889         case RTE_FLOW_ITEM_TYPE_IPV4:
890                 flow_dv_translate_item_ipv4(tmatcher->mask.buf, key, item,
891                                             inner);
892                 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
893                 dev_flow->dv.hash_fields |=
894                         mlx5_flow_hashfields_adjust(dev_flow, inner,
895                                                     MLX5_IPV4_LAYER_TYPES,
896                                                     MLX5_IPV4_IBV_RX_HASH);
897                 break;
898         case RTE_FLOW_ITEM_TYPE_IPV6:
899                 flow_dv_translate_item_ipv6(tmatcher->mask.buf, key, item,
900                                             inner);
901                 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
902                 dev_flow->dv.hash_fields |=
903                         mlx5_flow_hashfields_adjust(dev_flow, inner,
904                                                     MLX5_IPV6_LAYER_TYPES,
905                                                     MLX5_IPV6_IBV_RX_HASH);
906                 break;
907         case RTE_FLOW_ITEM_TYPE_TCP:
908                 flow_dv_translate_item_tcp(tmatcher->mask.buf, key, item,
909                                            inner);
910                 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
911                 dev_flow->dv.hash_fields |=
912                         mlx5_flow_hashfields_adjust(dev_flow, inner,
913                                                     ETH_RSS_TCP,
914                                                     (IBV_RX_HASH_SRC_PORT_TCP |
915                                                      IBV_RX_HASH_DST_PORT_TCP));
916                 break;
917         case RTE_FLOW_ITEM_TYPE_UDP:
918                 flow_dv_translate_item_udp(tmatcher->mask.buf, key, item,
919                                            inner);
920                 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
921                 dev_flow->verbs.hash_fields |=
922                         mlx5_flow_hashfields_adjust(dev_flow, inner,
923                                                     ETH_RSS_TCP,
924                                                     (IBV_RX_HASH_SRC_PORT_TCP |
925                                                      IBV_RX_HASH_DST_PORT_TCP));
926                 break;
927         case RTE_FLOW_ITEM_TYPE_NVGRE:
928                 flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
929                                              inner);
930                 break;
931         case RTE_FLOW_ITEM_TYPE_GRE:
932                 flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
933                                            inner);
934                 break;
935         case RTE_FLOW_ITEM_TYPE_VXLAN:
936         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
937                 flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
938                                              inner);
939                 break;
940         default:
941                 break;
942         }
943 }
944
945 /**
946  * Store the requested actions in an array.
947  *
948  * @param[in] action
949  *   Flow action to translate.
950  * @param[in, out] dev_flow
951  *   Pointer to the mlx5_flow.
952  */
953 static void
954 flow_dv_create_action(const struct rte_flow_action *action,
955                       struct mlx5_flow *dev_flow)
956 {
957         const struct rte_flow_action_queue *queue;
958         const struct rte_flow_action_rss *rss;
959         int actions_n = dev_flow->dv.actions_n;
960         struct rte_flow *flow = dev_flow->flow;
961
962         switch (action->type) {
963         case RTE_FLOW_ACTION_TYPE_VOID:
964                 break;
965         case RTE_FLOW_ACTION_TYPE_FLAG:
966                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
967                 dev_flow->dv.actions[actions_n].tag_value =
968                         MLX5_FLOW_MARK_DEFAULT;
969                 actions_n++;
970                 flow->actions |= MLX5_FLOW_ACTION_FLAG;
971                 break;
972         case RTE_FLOW_ACTION_TYPE_MARK:
973                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
974                 dev_flow->dv.actions[actions_n].tag_value =
975                         ((const struct rte_flow_action_mark *)
976                          (action->conf))->id;
977                 flow->actions |= MLX5_FLOW_ACTION_MARK;
978                 actions_n++;
979                 break;
980         case RTE_FLOW_ACTION_TYPE_DROP:
981                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_DROP;
982                 flow->actions |= MLX5_FLOW_ACTION_DROP;
983                 break;
984         case RTE_FLOW_ACTION_TYPE_QUEUE:
985                 queue = action->conf;
986                 flow->rss.queue_num = 1;
987                 (*flow->queue)[0] = queue->index;
988                 flow->actions |= MLX5_FLOW_ACTION_QUEUE;
989                 break;
990         case RTE_FLOW_ACTION_TYPE_RSS:
991                 rss = action->conf;
992                 if (flow->queue)
993                         memcpy((*flow->queue), rss->queue,
994                                rss->queue_num * sizeof(uint16_t));
995                 flow->rss.queue_num = rss->queue_num;
996                 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
997                 flow->rss.types = rss->types;
998                 flow->rss.level = rss->level;
999                 /* Added to array only in apply since we need the QP */
1000                 flow->actions |= MLX5_FLOW_ACTION_RSS;
1001                 break;
1002         default:
1003                 break;
1004         }
1005         dev_flow->dv.actions_n = actions_n;
1006 }
1007
1008 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
1009
1010 #define HEADER_IS_ZERO(match_criteria, headers)                              \
1011         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
1012                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1013
1014 /**
1015  * Calculate flow matcher enable bitmap.
1016  *
1017  * @param match_criteria
1018  *   Pointer to flow matcher criteria.
1019  *
1020  * @return
1021  *   Bitmap of enabled fields.
1022  */
1023 static uint8_t
1024 flow_dv_matcher_enable(uint32_t *match_criteria)
1025 {
1026         uint8_t match_criteria_enable;
1027
1028         match_criteria_enable =
1029                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1030                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
1031         match_criteria_enable |=
1032                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1033                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
1034         match_criteria_enable |=
1035                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1036                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
1037         match_criteria_enable |=
1038                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
1039                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
1040
1041         return match_criteria_enable;
1042 }
1043
1044 /**
1045  * Register the flow matcher.
1046  *
1047  * @param dev[in, out]
1048  *   Pointer to rte_eth_dev structure.
1049  * @param[in, out] matcher
1050  *   Pointer to flow matcher.
1051  * @parm[in, out] dev_flow
1052  *   Pointer to the dev_flow.
1053  * @param[out] error
1054  *   pointer to error structure.
1055  *
1056  * @return
1057  *   0 on success otherwise -errno and errno is set.
1058  */
1059 static int
1060 flow_dv_matcher_register(struct rte_eth_dev *dev,
1061                          struct mlx5_flow_dv_matcher *matcher,
1062                          struct mlx5_flow *dev_flow,
1063                          struct rte_flow_error *error)
1064 {
1065         struct priv *priv = dev->data->dev_private;
1066         struct mlx5_flow_dv_matcher *cache_matcher;
1067         struct mlx5dv_flow_matcher_attr dv_attr = {
1068                 .type = IBV_FLOW_ATTR_NORMAL,
1069                 .match_mask = (void *)&matcher->mask,
1070         };
1071
1072         /* Lookup from cache. */
1073         LIST_FOREACH(cache_matcher, &priv->matchers, next) {
1074                 if (matcher->crc == cache_matcher->crc &&
1075                     matcher->priority == cache_matcher->priority &&
1076                     matcher->egress == cache_matcher->egress &&
1077                     !memcmp((const void *)matcher->mask.buf,
1078                             (const void *)cache_matcher->mask.buf,
1079                             cache_matcher->mask.size)) {
1080                         DRV_LOG(DEBUG,
1081                                 "priority %hd use %s matcher %p: refcnt %d++",
1082                                 cache_matcher->priority,
1083                                 cache_matcher->egress ? "tx" : "rx",
1084                                 (void *)cache_matcher,
1085                                 rte_atomic32_read(&cache_matcher->refcnt));
1086                         rte_atomic32_inc(&cache_matcher->refcnt);
1087                         dev_flow->dv.matcher = cache_matcher;
1088                         return 0;
1089                 }
1090         }
1091         /* Register new matcher. */
1092         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
1093         if (!cache_matcher)
1094                 return rte_flow_error_set(error, ENOMEM,
1095                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1096                                           "cannot allocate matcher memory");
1097         *cache_matcher = *matcher;
1098         dv_attr.match_criteria_enable =
1099                 flow_dv_matcher_enable(cache_matcher->mask.buf);
1100         dv_attr.priority = matcher->priority;
1101         if (matcher->egress)
1102                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
1103         cache_matcher->matcher_object =
1104                 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
1105         if (!cache_matcher->matcher_object)
1106                 return rte_flow_error_set(error, ENOMEM,
1107                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1108                                           NULL, "cannot create matcher");
1109         rte_atomic32_inc(&cache_matcher->refcnt);
1110         LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
1111         dev_flow->dv.matcher = cache_matcher;
1112         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
1113                 cache_matcher->priority,
1114                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
1115                 rte_atomic32_read(&cache_matcher->refcnt));
1116         return 0;
1117 }
1118
1119
1120 /**
1121  * Fill the flow with DV spec.
1122  *
1123  * @param[in] dev
1124  *   Pointer to rte_eth_dev structure.
1125  * @param[in, out] dev_flow
1126  *   Pointer to the sub flow.
1127  * @param[in] attr
1128  *   Pointer to the flow attributes.
1129  * @param[in] items
1130  *   Pointer to the list of items.
1131  * @param[in] actions
1132  *   Pointer to the list of actions.
1133  * @param[out] error
1134  *   Pointer to the error structure.
1135  *
1136  * @return
1137  *   0 on success, a negative errno value otherwise and rte_ernno is set.
1138  */
1139 static int
1140 flow_dv_translate(struct rte_eth_dev *dev,
1141                   struct mlx5_flow *dev_flow,
1142                   const struct rte_flow_attr *attr,
1143                   const struct rte_flow_item items[],
1144                   const struct rte_flow_action actions[] __rte_unused,
1145                   struct rte_flow_error *error)
1146 {
1147         struct priv *priv = dev->data->dev_private;
1148         uint64_t priority = attr->priority;
1149         struct mlx5_flow_dv_matcher matcher = {
1150                 .mask = {
1151                         .size = sizeof(matcher.mask.buf),
1152                 },
1153         };
1154         void *match_value = dev_flow->dv.value.buf;
1155         uint8_t inner = 0;
1156
1157         if (priority == MLX5_FLOW_PRIO_RSVD)
1158                 priority = priv->config.flow_prio - 1;
1159         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++)
1160                 flow_dv_create_item(&matcher, match_value, items, dev_flow,
1161                                     inner);
1162         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
1163                                      matcher.mask.size);
1164         if (priority == MLX5_FLOW_PRIO_RSVD)
1165                 priority = priv->config.flow_prio - 1;
1166         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
1167                                                      matcher.priority);
1168         matcher.egress = attr->egress;
1169         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
1170                 return -rte_errno;
1171         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1172                 flow_dv_create_action(actions, dev_flow);
1173         return 0;
1174 }
1175
1176 /**
1177  * Apply the flow to the NIC.
1178  *
1179  * @param[in] dev
1180  *   Pointer to the Ethernet device structure.
1181  * @param[in, out] flow
1182  *   Pointer to flow structure.
1183  * @param[out] error
1184  *   Pointer to error structure.
1185  *
1186  * @return
1187  *   0 on success, a negative errno value otherwise and rte_errno is set.
1188  */
1189 static int
1190 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1191               struct rte_flow_error *error)
1192 {
1193         struct mlx5_flow_dv *dv;
1194         struct mlx5_flow *dev_flow;
1195         int n;
1196         int err;
1197
1198         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1199                 dv = &dev_flow->dv;
1200                 n = dv->actions_n;
1201                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1202                         dv->hrxq = mlx5_hrxq_drop_new(dev);
1203                         if (!dv->hrxq) {
1204                                 rte_flow_error_set
1205                                         (error, errno,
1206                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1207                                          "cannot get drop hash queue");
1208                                 goto error;
1209                         }
1210                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1211                         dv->actions[n].qp = dv->hrxq->qp;
1212                         n++;
1213                 } else if (flow->actions &
1214                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
1215                         struct mlx5_hrxq *hrxq;
1216                         hrxq = mlx5_hrxq_get(dev, flow->key,
1217                                              MLX5_RSS_HASH_KEY_LEN,
1218                                              dv->hash_fields,
1219                                              (*flow->queue),
1220                                              flow->rss.queue_num);
1221                         if (!hrxq)
1222                                 hrxq = mlx5_hrxq_new
1223                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
1224                                          dv->hash_fields, (*flow->queue),
1225                                          flow->rss.queue_num,
1226                                          !!(flow->layers &
1227                                             MLX5_FLOW_LAYER_TUNNEL));
1228                         if (!hrxq) {
1229                                 rte_flow_error_set
1230                                         (error, rte_errno,
1231                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1232                                          "cannot get hash queue");
1233                                 goto error;
1234                         }
1235                         dv->hrxq = hrxq;
1236                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1237                         dv->actions[n].qp = hrxq->qp;
1238                         n++;
1239                 }
1240                 dv->flow =
1241                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
1242                                                   (void *)&dv->value, n,
1243                                                   dv->actions);
1244                 if (!dv->flow) {
1245                         rte_flow_error_set(error, errno,
1246                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1247                                            NULL,
1248                                            "hardware refuses to create flow");
1249                         goto error;
1250                 }
1251         }
1252         return 0;
1253 error:
1254         err = rte_errno; /* Save rte_errno before cleanup. */
1255         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1256                 struct mlx5_flow_dv *dv = &dev_flow->dv;
1257                 if (dv->hrxq) {
1258                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1259                                 mlx5_hrxq_drop_release(dev);
1260                         else
1261                                 mlx5_hrxq_release(dev, dv->hrxq);
1262                         dv->hrxq = NULL;
1263                 }
1264         }
1265         rte_errno = err; /* Restore rte_errno. */
1266         return -rte_errno;
1267 }
1268
1269 /**
1270  * Release the flow matcher.
1271  *
1272  * @param dev
1273  *   Pointer to Ethernet device.
1274  * @param flow
1275  *   Pointer to mlx5_flow.
1276  *
1277  * @return
1278  *   1 while a reference on it exists, 0 when freed.
1279  */
1280 static int
1281 flow_dv_matcher_release(struct rte_eth_dev *dev,
1282                         struct mlx5_flow *flow)
1283 {
1284         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
1285
1286         assert(matcher->matcher_object);
1287         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
1288                 dev->data->port_id, (void *)matcher,
1289                 rte_atomic32_read(&matcher->refcnt));
1290         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
1291                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
1292                            (matcher->matcher_object));
1293                 LIST_REMOVE(matcher, next);
1294                 rte_free(matcher);
1295                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
1296                         dev->data->port_id, (void *)matcher);
1297                 return 0;
1298         }
1299         return 1;
1300 }
1301
1302 /**
1303  * Remove the flow from the NIC but keeps it in memory.
1304  *
1305  * @param[in] dev
1306  *   Pointer to Ethernet device.
1307  * @param[in, out] flow
1308  *   Pointer to flow structure.
1309  */
1310 static void
1311 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1312 {
1313         struct mlx5_flow_dv *dv;
1314         struct mlx5_flow *dev_flow;
1315
1316         if (!flow)
1317                 return;
1318         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1319                 dv = &dev_flow->dv;
1320                 if (dv->flow) {
1321                         claim_zero(mlx5_glue->destroy_flow(dv->flow));
1322                         dv->flow = NULL;
1323                 }
1324                 if (dv->hrxq) {
1325                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1326                                 mlx5_hrxq_drop_release(dev);
1327                         else
1328                                 mlx5_hrxq_release(dev, dv->hrxq);
1329                         dv->hrxq = NULL;
1330                 }
1331         }
1332         if (flow->counter)
1333                 flow->counter = NULL;
1334 }
1335
1336 /**
1337  * Remove the flow from the NIC and the memory.
1338  *
1339  * @param[in] dev
1340  *   Pointer to the Ethernet device structure.
1341  * @param[in, out] flow
1342  *   Pointer to flow structure.
1343  */
1344 static void
1345 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1346 {
1347         struct mlx5_flow *dev_flow;
1348
1349         if (!flow)
1350                 return;
1351         flow_dv_remove(dev, flow);
1352         while (!LIST_EMPTY(&flow->dev_flows)) {
1353                 dev_flow = LIST_FIRST(&flow->dev_flows);
1354                 LIST_REMOVE(dev_flow, next);
1355                 if (dev_flow->dv.matcher)
1356                         flow_dv_matcher_release(dev, dev_flow);
1357                 rte_free(dev_flow);
1358         }
1359 }
1360
1361 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
1362         .validate = flow_dv_validate,
1363         .prepare = flow_dv_prepare,
1364         .translate = flow_dv_translate,
1365         .apply = flow_dv_apply,
1366         .remove = flow_dv_remove,
1367         .destroy = flow_dv_destroy,
1368 };
1369
1370 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */