net/mlx5: add abstraction for multiple flow drivers
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
28 #include <rte_ip.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_prm.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 /**
39  * Verify the @p attributes will be correctly understood by the NIC and store
40  * them in the @p flow if everything is correct.
41  *
42  * @param[in] dev
43  *   Pointer to dev struct.
44  * @param[in] attributes
45  *   Pointer to flow attributes
46  * @param[out] error
47  *   Pointer to error structure.
48  *
49  * @return
50  *   0 on success, a negative errno value otherwise and rte_errno is set.
51  */
52 static int
53 flow_dv_validate_attributes(struct rte_eth_dev *dev,
54                             const struct rte_flow_attr *attributes,
55                             struct rte_flow_error *error)
56 {
57         struct priv *priv = dev->data->dev_private;
58         uint32_t priority_max = priv->config.flow_prio - 1;
59
60         if (attributes->group)
61                 return rte_flow_error_set(error, ENOTSUP,
62                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
63                                           NULL,
64                                           "groups is not supported");
65         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
66             attributes->priority >= priority_max)
67                 return rte_flow_error_set(error, ENOTSUP,
68                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
69                                           NULL,
70                                           "priority out of range");
71         if (attributes->egress)
72                 return rte_flow_error_set(error, ENOTSUP,
73                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
74                                           NULL,
75                                           "egress is not supported");
76         if (attributes->transfer)
77                 return rte_flow_error_set(error, ENOTSUP,
78                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
79                                           NULL,
80                                           "transfer is not supported");
81         if (!attributes->ingress)
82                 return rte_flow_error_set(error, EINVAL,
83                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
84                                           NULL,
85                                           "ingress attribute is mandatory");
86         return 0;
87 }
88
89 /**
90  * Internal validation function. For validating both actions and items.
91  *
92  * @param[in] dev
93  *   Pointer to the rte_eth_dev structure.
94  * @param[in] attr
95  *   Pointer to the flow attributes.
96  * @param[in] items
97  *   Pointer to the list of items.
98  * @param[in] actions
99  *   Pointer to the list of actions.
100  * @param[out] error
101  *   Pointer to the error structure.
102  *
103  * @return
104  *   0 on success, a negative errno value otherwise and rte_ernno is set.
105  */
106 static int
107 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
108                  const struct rte_flow_item items[],
109                  const struct rte_flow_action actions[],
110                  struct rte_flow_error *error)
111 {
112         int ret;
113         uint32_t action_flags = 0;
114         uint32_t item_flags = 0;
115         int tunnel = 0;
116         uint8_t next_protocol = 0xff;
117         int actions_n = 0;
118
119         if (items == NULL)
120                 return -1;
121         ret = flow_dv_validate_attributes(dev, attr, error);
122         if (ret < 0)
123                 return ret;
124         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
125                 switch (items->type) {
126                 case RTE_FLOW_ITEM_TYPE_VOID:
127                         break;
128                 case RTE_FLOW_ITEM_TYPE_ETH:
129                         ret = mlx5_flow_validate_item_eth(items, item_flags,
130                                                           error);
131                         if (ret < 0)
132                                 return ret;
133                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
134                                                MLX5_FLOW_LAYER_OUTER_L2;
135                         break;
136                 case RTE_FLOW_ITEM_TYPE_VLAN:
137                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
138                                                            error);
139                         if (ret < 0)
140                                 return ret;
141                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
142                                                MLX5_FLOW_LAYER_OUTER_VLAN;
143                         break;
144                 case RTE_FLOW_ITEM_TYPE_IPV4:
145                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
146                                                            error);
147                         if (ret < 0)
148                                 return ret;
149                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
150                                                MLX5_FLOW_LAYER_OUTER_L3_IPV4;
151                         if (items->mask != NULL &&
152                             ((const struct rte_flow_item_ipv4 *)
153                              items->mask)->hdr.next_proto_id)
154                                 next_protocol =
155                                         ((const struct rte_flow_item_ipv4 *)
156                                          (items->spec))->hdr.next_proto_id;
157                         break;
158                 case RTE_FLOW_ITEM_TYPE_IPV6:
159                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
160                                                            error);
161                         if (ret < 0)
162                                 return ret;
163                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
164                                                MLX5_FLOW_LAYER_OUTER_L3_IPV6;
165                         if (items->mask != NULL &&
166                             ((const struct rte_flow_item_ipv6 *)
167                              items->mask)->hdr.proto)
168                                 next_protocol =
169                                         ((const struct rte_flow_item_ipv6 *)
170                                          items->spec)->hdr.proto;
171                         break;
172                 case RTE_FLOW_ITEM_TYPE_UDP:
173                         ret = mlx5_flow_validate_item_udp(items, item_flags,
174                                                           next_protocol,
175                                                           error);
176                         if (ret < 0)
177                                 return ret;
178                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
179                                                MLX5_FLOW_LAYER_OUTER_L4_UDP;
180                         break;
181                 case RTE_FLOW_ITEM_TYPE_TCP:
182                         ret = mlx5_flow_validate_item_tcp(items, item_flags,
183                                                           next_protocol, error);
184                         if (ret < 0)
185                                 return ret;
186                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
187                                                MLX5_FLOW_LAYER_OUTER_L4_TCP;
188                         break;
189                 case RTE_FLOW_ITEM_TYPE_VXLAN:
190                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
191                                                             error);
192                         if (ret < 0)
193                                 return ret;
194                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
195                         break;
196                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
197                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
198                                                                 item_flags, dev,
199                                                                 error);
200                         if (ret < 0)
201                                 return ret;
202                         item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
203                         break;
204                 case RTE_FLOW_ITEM_TYPE_GRE:
205                         ret = mlx5_flow_validate_item_gre(items, item_flags,
206                                                           next_protocol, error);
207                         if (ret < 0)
208                                 return ret;
209                         item_flags |= MLX5_FLOW_LAYER_GRE;
210                         break;
211                 case RTE_FLOW_ITEM_TYPE_MPLS:
212                         ret = mlx5_flow_validate_item_mpls(items, item_flags,
213                                                            next_protocol,
214                                                            error);
215                         if (ret < 0)
216                                 return ret;
217                         item_flags |= MLX5_FLOW_LAYER_MPLS;
218                         break;
219                 default:
220                         return rte_flow_error_set(error, ENOTSUP,
221                                                   RTE_FLOW_ERROR_TYPE_ITEM,
222                                                   NULL, "item not supported");
223                 }
224         }
225         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
226                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
227                         return rte_flow_error_set(error, ENOTSUP,
228                                                   RTE_FLOW_ERROR_TYPE_ACTION,
229                                                   actions, "too many actions");
230                 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
231                 switch (actions->type) {
232                 case RTE_FLOW_ACTION_TYPE_VOID:
233                         break;
234                 case RTE_FLOW_ACTION_TYPE_FLAG:
235                         ret = mlx5_flow_validate_action_flag(action_flags,
236                                                              error);
237                         if (ret < 0)
238                                 return ret;
239                         action_flags |= MLX5_FLOW_ACTION_FLAG;
240                         ++actions_n;
241                         break;
242                 case RTE_FLOW_ACTION_TYPE_MARK:
243                         ret = mlx5_flow_validate_action_mark(actions,
244                                                              action_flags,
245                                                              error);
246                         if (ret < 0)
247                                 return ret;
248                         action_flags |= MLX5_FLOW_ACTION_MARK;
249                         ++actions_n;
250                         break;
251                 case RTE_FLOW_ACTION_TYPE_DROP:
252                         ret = mlx5_flow_validate_action_drop(action_flags,
253                                                              error);
254                         if (ret < 0)
255                                 return ret;
256                         action_flags |= MLX5_FLOW_ACTION_DROP;
257                         ++actions_n;
258                         break;
259                 case RTE_FLOW_ACTION_TYPE_QUEUE:
260                         ret = mlx5_flow_validate_action_queue(actions,
261                                                               action_flags, dev,
262                                                               error);
263                         if (ret < 0)
264                                 return ret;
265                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
266                         ++actions_n;
267                         break;
268                 case RTE_FLOW_ACTION_TYPE_RSS:
269                         ret = mlx5_flow_validate_action_rss(actions,
270                                                             action_flags, dev,
271                                                             error);
272                         if (ret < 0)
273                                 return ret;
274                         action_flags |= MLX5_FLOW_ACTION_RSS;
275                         ++actions_n;
276                         break;
277                 case RTE_FLOW_ACTION_TYPE_COUNT:
278                         ret = mlx5_flow_validate_action_count(dev, error);
279                         if (ret < 0)
280                                 return ret;
281                         action_flags |= MLX5_FLOW_ACTION_COUNT;
282                         ++actions_n;
283                         break;
284                 default:
285                         return rte_flow_error_set(error, ENOTSUP,
286                                                   RTE_FLOW_ERROR_TYPE_ACTION,
287                                                   actions,
288                                                   "action not supported");
289                 }
290         }
291         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
292                 return rte_flow_error_set(error, EINVAL,
293                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
294                                           "no fate action is found");
295         return 0;
296 }
297
298 /**
299  * Internal preparation function. Allocates the DV flow size,
300  * this size is constant.
301  *
302  * @param[in] attr
303  *   Pointer to the flow attributes.
304  * @param[in] items
305  *   Pointer to the list of items.
306  * @param[in] actions
307  *   Pointer to the list of actions.
308  * @param[out] item_flags
309  *   Pointer to bit mask of all items detected.
310  * @param[out] action_flags
311  *   Pointer to bit mask of all actions detected.
312  * @param[out] error
313  *   Pointer to the error structure.
314  *
315  * @return
316  *   Pointer to mlx5_flow object on success,
317  *   otherwise NULL and rte_ernno is set.
318  */
319 static struct mlx5_flow *
320 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
321                 const struct rte_flow_item items[] __rte_unused,
322                 const struct rte_flow_action actions[] __rte_unused,
323                 uint64_t *item_flags __rte_unused,
324                 uint64_t *action_flags __rte_unused,
325                 struct rte_flow_error *error)
326 {
327         uint32_t size = sizeof(struct mlx5_flow);
328         struct mlx5_flow *flow;
329
330         flow = rte_calloc(__func__, 1, size, 0);
331         if (!flow) {
332                 rte_flow_error_set(error, ENOMEM,
333                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
334                                    "not enough memory to create flow");
335                 return NULL;
336         }
337         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
338         return flow;
339 }
340
341 /**
342  * Add Ethernet item to matcher and to the value.
343  *
344  * @param[in, out] matcher
345  *   Flow matcher.
346  * @param[in, out] key
347  *   Flow matcher value.
348  * @param[in] item
349  *   Flow pattern to translate.
350  * @param[in] inner
351  *   Item is inner pattern.
352  */
353 static void
354 flow_dv_translate_item_eth(void *matcher, void *key,
355                            const struct rte_flow_item *item, int inner)
356 {
357         const struct rte_flow_item_eth *eth_m = item->mask;
358         const struct rte_flow_item_eth *eth_v = item->spec;
359         const struct rte_flow_item_eth nic_mask = {
360                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
361                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
362                 .type = RTE_BE16(0xffff),
363         };
364         void *headers_m;
365         void *headers_v;
366         char *l24_v;
367         unsigned int i;
368
369         if (!eth_v)
370                 return;
371         if (!eth_m)
372                 eth_m = &nic_mask;
373         if (inner) {
374                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
375                                          inner_headers);
376                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
377         } else {
378                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
379                                          outer_headers);
380                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
381         }
382         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
383                &eth_m->dst, sizeof(eth_m->dst));
384         /* The value must be in the range of the mask. */
385         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
386         for (i = 0; i < sizeof(eth_m->dst); ++i)
387                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
388         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
389                &eth_m->src, sizeof(eth_m->src));
390         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
391         /* The value must be in the range of the mask. */
392         for (i = 0; i < sizeof(eth_m->dst); ++i)
393                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
394         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
395                  rte_be_to_cpu_16(eth_m->type));
396         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
397         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
398 }
399
400 /**
401  * Add VLAN item to matcher and to the value.
402  *
403  * @param[in, out] matcher
404  *   Flow matcher.
405  * @param[in, out] key
406  *   Flow matcher value.
407  * @param[in] item
408  *   Flow pattern to translate.
409  * @param[in] inner
410  *   Item is inner pattern.
411  */
412 static void
413 flow_dv_translate_item_vlan(void *matcher, void *key,
414                             const struct rte_flow_item *item,
415                             int inner)
416 {
417         const struct rte_flow_item_vlan *vlan_m = item->mask;
418         const struct rte_flow_item_vlan *vlan_v = item->spec;
419         const struct rte_flow_item_vlan nic_mask = {
420                 .tci = RTE_BE16(0x0fff),
421                 .inner_type = RTE_BE16(0xffff),
422         };
423         void *headers_m;
424         void *headers_v;
425         uint16_t tci_m;
426         uint16_t tci_v;
427
428         if (!vlan_v)
429                 return;
430         if (!vlan_m)
431                 vlan_m = &nic_mask;
432         if (inner) {
433                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
434                                          inner_headers);
435                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
436         } else {
437                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
438                                          outer_headers);
439                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
440         }
441         tci_m = rte_be_to_cpu_16(vlan_m->tci);
442         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
443         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
444         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
445         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
446         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
447         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
448         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
449         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
450         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
451 }
452
453 /**
454  * Add IPV4 item to matcher and to the value.
455  *
456  * @param[in, out] matcher
457  *   Flow matcher.
458  * @param[in, out] key
459  *   Flow matcher value.
460  * @param[in] item
461  *   Flow pattern to translate.
462  * @param[in] inner
463  *   Item is inner pattern.
464  */
465 static void
466 flow_dv_translate_item_ipv4(void *matcher, void *key,
467                             const struct rte_flow_item *item,
468                             int inner)
469 {
470         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
471         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
472         const struct rte_flow_item_ipv4 nic_mask = {
473                 .hdr = {
474                         .src_addr = RTE_BE32(0xffffffff),
475                         .dst_addr = RTE_BE32(0xffffffff),
476                         .type_of_service = 0xff,
477                         .next_proto_id = 0xff,
478                 },
479         };
480         void *headers_m;
481         void *headers_v;
482         char *l24_m;
483         char *l24_v;
484         uint8_t tos;
485
486         if (!ipv4_v)
487                 return;
488         if (!ipv4_m)
489                 ipv4_m = &nic_mask;
490         if (inner) {
491                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
492                                          inner_headers);
493                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
494         } else {
495                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
496                                          outer_headers);
497                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
498         }
499         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
500         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
501         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
502                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
503         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
504                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
505         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
506         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
507         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
508                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
509         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
510                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
511         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
512         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
513         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
514         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
515                  ipv4_m->hdr.type_of_service);
516         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
517         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
518                  ipv4_m->hdr.type_of_service >> 2);
519         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
520         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
521                  ipv4_m->hdr.next_proto_id);
522         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
523                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
524 }
525
526 /**
527  * Add IPV6 item to matcher and to the value.
528  *
529  * @param[in, out] matcher
530  *   Flow matcher.
531  * @param[in, out] key
532  *   Flow matcher value.
533  * @param[in] item
534  *   Flow pattern to translate.
535  * @param[in] inner
536  *   Item is inner pattern.
537  */
538 static void
539 flow_dv_translate_item_ipv6(void *matcher, void *key,
540                             const struct rte_flow_item *item,
541                             int inner)
542 {
543         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
544         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
545         const struct rte_flow_item_ipv6 nic_mask = {
546                 .hdr = {
547                         .src_addr =
548                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
549                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
550                         .dst_addr =
551                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
552                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
553                         .vtc_flow = RTE_BE32(0xffffffff),
554                         .proto = 0xff,
555                         .hop_limits = 0xff,
556                 },
557         };
558         void *headers_m;
559         void *headers_v;
560         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
561         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
562         char *l24_m;
563         char *l24_v;
564         uint32_t vtc_m;
565         uint32_t vtc_v;
566         int i;
567         int size;
568
569         if (!ipv6_v)
570                 return;
571         if (!ipv6_m)
572                 ipv6_m = &nic_mask;
573         if (inner) {
574                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
575                                          inner_headers);
576                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
577         } else {
578                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
579                                          outer_headers);
580                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
581         }
582         size = sizeof(ipv6_m->hdr.dst_addr);
583         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
584                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
585         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
586                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
587         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
588         for (i = 0; i < size; ++i)
589                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
590         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
591                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
592         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
593                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
594         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
595         for (i = 0; i < size; ++i)
596                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
597         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
598         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
599         /* TOS. */
600         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
601         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
602         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
603         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
604         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
605         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
606         /* Label. */
607         if (inner) {
608                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
609                          vtc_m);
610                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
611                          vtc_v);
612         } else {
613                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
614                          vtc_m);
615                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
616                          vtc_v);
617         }
618         /* Protocol. */
619         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
620                  ipv6_m->hdr.proto);
621         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
622                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
623 }
624
625 /**
626  * Add TCP item to matcher and to the value.
627  *
628  * @param[in, out] matcher
629  *   Flow matcher.
630  * @param[in, out] key
631  *   Flow matcher value.
632  * @param[in] item
633  *   Flow pattern to translate.
634  * @param[in] inner
635  *   Item is inner pattern.
636  */
637 static void
638 flow_dv_translate_item_tcp(void *matcher, void *key,
639                            const struct rte_flow_item *item,
640                            int inner)
641 {
642         const struct rte_flow_item_tcp *tcp_m = item->mask;
643         const struct rte_flow_item_tcp *tcp_v = item->spec;
644         void *headers_m;
645         void *headers_v;
646
647         if (!tcp_v)
648                 return;
649         if (!tcp_m)
650                 tcp_m = &rte_flow_item_tcp_mask;
651         if (inner) {
652                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
653                                          inner_headers);
654                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
655         } else {
656                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
657                                          outer_headers);
658                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
659         }
660         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
661         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
662         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
663                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
664         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
665                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
666         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
667                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
668         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
669                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
670 }
671
672 /**
673  * Add UDP item to matcher and to the value.
674  *
675  * @param[in, out] matcher
676  *   Flow matcher.
677  * @param[in, out] key
678  *   Flow matcher value.
679  * @param[in] item
680  *   Flow pattern to translate.
681  * @param[in] inner
682  *   Item is inner pattern.
683  */
684 static void
685 flow_dv_translate_item_udp(void *matcher, void *key,
686                            const struct rte_flow_item *item,
687                            int inner)
688 {
689         const struct rte_flow_item_udp *udp_m = item->mask;
690         const struct rte_flow_item_udp *udp_v = item->spec;
691         void *headers_m;
692         void *headers_v;
693
694         if (!udp_v)
695                 return;
696         if (!udp_m)
697                 udp_m = &rte_flow_item_udp_mask;
698         if (inner) {
699                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
700                                          inner_headers);
701                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
702         } else {
703                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
704                                          outer_headers);
705                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
706         }
707         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
708         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
709         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
710                  rte_be_to_cpu_16(udp_m->hdr.src_port));
711         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
712                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
713         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
714                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
715         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
716                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
717 }
718
719 /**
720  * Add GRE item to matcher and to the value.
721  *
722  * @param[in, out] matcher
723  *   Flow matcher.
724  * @param[in, out] key
725  *   Flow matcher value.
726  * @param[in] item
727  *   Flow pattern to translate.
728  * @param[in] inner
729  *   Item is inner pattern.
730  */
731 static void
732 flow_dv_translate_item_gre(void *matcher, void *key,
733                            const struct rte_flow_item *item,
734                            int inner)
735 {
736         const struct rte_flow_item_gre *gre_m = item->mask;
737         const struct rte_flow_item_gre *gre_v = item->spec;
738         void *headers_m;
739         void *headers_v;
740         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
741         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
742
743         if (!gre_v)
744                 return;
745         if (!gre_m)
746                 gre_m = &rte_flow_item_gre_mask;
747         if (inner) {
748                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
749                                          inner_headers);
750                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
751         } else {
752                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
753                                          outer_headers);
754                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
755         }
756         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
757         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
758         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
759                  rte_be_to_cpu_16(gre_m->protocol));
760         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
761                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
762 }
763
764 /**
765  * Add NVGRE item to matcher and to the value.
766  *
767  * @param[in, out] matcher
768  *   Flow matcher.
769  * @param[in, out] key
770  *   Flow matcher value.
771  * @param[in] item
772  *   Flow pattern to translate.
773  * @param[in] inner
774  *   Item is inner pattern.
775  */
776 static void
777 flow_dv_translate_item_nvgre(void *matcher, void *key,
778                              const struct rte_flow_item *item,
779                              int inner)
780 {
781         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
782         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
783         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
784         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
785         char *gre_key_m;
786         char *gre_key_v;
787         int size;
788         int i;
789
790         if (!nvgre_v)
791                 return;
792         if (!nvgre_m)
793                 nvgre_m = &rte_flow_item_nvgre_mask;
794         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
795         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
796         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
797         memcpy(gre_key_m, nvgre_m->tni, size);
798         for (i = 0; i < size; ++i)
799                 gre_key_v[i] = gre_key_m[i] & ((const char *)(nvgre_v->tni))[i];
800         flow_dv_translate_item_gre(matcher, key, item, inner);
801 }
802
803 /**
804  * Add VXLAN item to matcher and to the value.
805  *
806  * @param[in, out] matcher
807  *   Flow matcher.
808  * @param[in, out] key
809  *   Flow matcher value.
810  * @param[in] item
811  *   Flow pattern to translate.
812  * @param[in] inner
813  *   Item is inner pattern.
814  */
815 static void
816 flow_dv_translate_item_vxlan(void *matcher, void *key,
817                              const struct rte_flow_item *item,
818                              int inner)
819 {
820         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
821         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
822         void *headers_m;
823         void *headers_v;
824         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
825         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
826         char *vni_m;
827         char *vni_v;
828         uint16_t dport;
829         int size;
830         int i;
831
832         if (!vxlan_v)
833                 return;
834         if (!vxlan_m)
835                 vxlan_m = &rte_flow_item_vxlan_mask;
836         if (inner) {
837                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
838                                          inner_headers);
839                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
840         } else {
841                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
842                                          outer_headers);
843                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
844         }
845         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
846                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
847         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
848                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
849                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
850         }
851         size = sizeof(vxlan_m->vni);
852         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
853         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
854         memcpy(vni_m, vxlan_m->vni, size);
855         for (i = 0; i < size; ++i)
856                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
857 }
858
859 /**
860  * Update the matcher and the value based the selected item.
861  *
862  * @param[in, out] matcher
863  *   Flow matcher.
864  * @param[in, out] key
865  *   Flow matcher value.
866  * @param[in] item
867  *   Flow pattern to translate.
868  * @param[in, out] dev_flow
869  *   Pointer to the mlx5_flow.
870  * @param[in] inner
871  *   Item is inner pattern.
872  */
873 static void
874 flow_dv_create_item(void *matcher, void *key,
875                     const struct rte_flow_item *item,
876                     struct mlx5_flow *dev_flow,
877                     int inner)
878 {
879         struct mlx5_flow_dv_matcher *tmatcher = matcher;
880
881         switch (item->type) {
882         case RTE_FLOW_ITEM_TYPE_VOID:
883         case RTE_FLOW_ITEM_TYPE_END:
884                 break;
885         case RTE_FLOW_ITEM_TYPE_ETH:
886                 flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
887                                            inner);
888                 tmatcher->priority = MLX5_PRIORITY_MAP_L2;
889                 break;
890         case RTE_FLOW_ITEM_TYPE_VLAN:
891                 flow_dv_translate_item_vlan(tmatcher->mask.buf, key, item,
892                                             inner);
893                 break;
894         case RTE_FLOW_ITEM_TYPE_IPV4:
895                 flow_dv_translate_item_ipv4(tmatcher->mask.buf, key, item,
896                                             inner);
897                 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
898                 dev_flow->dv.hash_fields |=
899                         mlx5_flow_hashfields_adjust(dev_flow, inner,
900                                                     MLX5_IPV4_LAYER_TYPES,
901                                                     MLX5_IPV4_IBV_RX_HASH);
902                 break;
903         case RTE_FLOW_ITEM_TYPE_IPV6:
904                 flow_dv_translate_item_ipv6(tmatcher->mask.buf, key, item,
905                                             inner);
906                 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
907                 dev_flow->dv.hash_fields |=
908                         mlx5_flow_hashfields_adjust(dev_flow, inner,
909                                                     MLX5_IPV6_LAYER_TYPES,
910                                                     MLX5_IPV6_IBV_RX_HASH);
911                 break;
912         case RTE_FLOW_ITEM_TYPE_TCP:
913                 flow_dv_translate_item_tcp(tmatcher->mask.buf, key, item,
914                                            inner);
915                 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
916                 dev_flow->dv.hash_fields |=
917                         mlx5_flow_hashfields_adjust(dev_flow, inner,
918                                                     ETH_RSS_TCP,
919                                                     (IBV_RX_HASH_SRC_PORT_TCP |
920                                                      IBV_RX_HASH_DST_PORT_TCP));
921                 break;
922         case RTE_FLOW_ITEM_TYPE_UDP:
923                 flow_dv_translate_item_udp(tmatcher->mask.buf, key, item,
924                                            inner);
925                 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
926                 dev_flow->verbs.hash_fields |=
927                         mlx5_flow_hashfields_adjust(dev_flow, inner,
928                                                     ETH_RSS_TCP,
929                                                     (IBV_RX_HASH_SRC_PORT_TCP |
930                                                      IBV_RX_HASH_DST_PORT_TCP));
931                 break;
932         case RTE_FLOW_ITEM_TYPE_NVGRE:
933                 flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
934                                              inner);
935                 break;
936         case RTE_FLOW_ITEM_TYPE_GRE:
937                 flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
938                                            inner);
939                 break;
940         case RTE_FLOW_ITEM_TYPE_VXLAN:
941         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
942                 flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
943                                              inner);
944                 break;
945         default:
946                 break;
947         }
948 }
949
950 /**
951  * Store the requested actions in an array.
952  *
953  * @param[in] action
954  *   Flow action to translate.
955  * @param[in, out] dev_flow
956  *   Pointer to the mlx5_flow.
957  */
958 static void
959 flow_dv_create_action(const struct rte_flow_action *action,
960                       struct mlx5_flow *dev_flow)
961 {
962         const struct rte_flow_action_queue *queue;
963         const struct rte_flow_action_rss *rss;
964         int actions_n = dev_flow->dv.actions_n;
965         struct rte_flow *flow = dev_flow->flow;
966
967         switch (action->type) {
968         case RTE_FLOW_ACTION_TYPE_VOID:
969                 break;
970         case RTE_FLOW_ACTION_TYPE_FLAG:
971                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
972                 dev_flow->dv.actions[actions_n].tag_value =
973                         MLX5_FLOW_MARK_DEFAULT;
974                 actions_n++;
975                 break;
976         case RTE_FLOW_ACTION_TYPE_MARK:
977                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
978                 dev_flow->dv.actions[actions_n].tag_value =
979                         ((const struct rte_flow_action_mark *)
980                          (action->conf))->id;
981                 actions_n++;
982                 break;
983         case RTE_FLOW_ACTION_TYPE_DROP:
984                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_DROP;
985                 flow->actions |= MLX5_FLOW_ACTION_DROP;
986                 break;
987         case RTE_FLOW_ACTION_TYPE_QUEUE:
988                 queue = action->conf;
989                 flow->rss.queue_num = 1;
990                 (*flow->queue)[0] = queue->index;
991                 break;
992         case RTE_FLOW_ACTION_TYPE_RSS:
993                 rss = action->conf;
994                 if (flow->queue)
995                         memcpy((*flow->queue), rss->queue,
996                                rss->queue_num * sizeof(uint16_t));
997                 flow->rss.queue_num = rss->queue_num;
998                 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
999                 flow->rss.types = rss->types;
1000                 flow->rss.level = rss->level;
1001                 /* Added to array only in apply since we need the QP */
1002                 break;
1003         default:
1004                 break;
1005         }
1006         dev_flow->dv.actions_n = actions_n;
1007 }
1008
1009 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
1010
1011 #define HEADER_IS_ZERO(match_criteria, headers)                              \
1012         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
1013                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1014
1015 /**
1016  * Calculate flow matcher enable bitmap.
1017  *
1018  * @param match_criteria
1019  *   Pointer to flow matcher criteria.
1020  *
1021  * @return
1022  *   Bitmap of enabled fields.
1023  */
1024 static uint8_t
1025 flow_dv_matcher_enable(uint32_t *match_criteria)
1026 {
1027         uint8_t match_criteria_enable;
1028
1029         match_criteria_enable =
1030                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1031                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
1032         match_criteria_enable |=
1033                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1034                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
1035         match_criteria_enable |=
1036                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1037                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
1038         match_criteria_enable |=
1039                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
1040                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
1041
1042         return match_criteria_enable;
1043 }
1044
1045 /**
1046  * Register the flow matcher.
1047  *
1048  * @param dev[in, out]
1049  *   Pointer to rte_eth_dev structure.
1050  * @param[in, out] matcher
1051  *   Pointer to flow matcher.
1052  * @parm[in, out] dev_flow
1053  *   Pointer to the dev_flow.
1054  * @param[out] error
1055  *   pointer to error structure.
1056  *
1057  * @return
1058  *   0 on success otherwise -errno and errno is set.
1059  */
1060 static int
1061 flow_dv_matcher_register(struct rte_eth_dev *dev,
1062                          struct mlx5_flow_dv_matcher *matcher,
1063                          struct mlx5_flow *dev_flow,
1064                          struct rte_flow_error *error)
1065 {
1066         struct priv *priv = dev->data->dev_private;
1067         struct mlx5_flow_dv_matcher *cache_matcher;
1068         struct mlx5dv_flow_matcher_attr dv_attr = {
1069                 .type = IBV_FLOW_ATTR_NORMAL,
1070                 .match_mask = (void *)&matcher->mask,
1071         };
1072
1073         /* Lookup from cache. */
1074         LIST_FOREACH(cache_matcher, &priv->matchers, next) {
1075                 if (matcher->crc == cache_matcher->crc &&
1076                     matcher->priority == cache_matcher->priority &&
1077                     matcher->egress == cache_matcher->egress &&
1078                     !memcmp((const void *)matcher->mask.buf,
1079                             (const void *)cache_matcher->mask.buf,
1080                             cache_matcher->mask.size)) {
1081                         DRV_LOG(DEBUG,
1082                                 "priority %hd use %s matcher %p: refcnt %d++",
1083                                 cache_matcher->priority,
1084                                 cache_matcher->egress ? "tx" : "rx",
1085                                 (void *)cache_matcher,
1086                                 rte_atomic32_read(&cache_matcher->refcnt));
1087                         rte_atomic32_inc(&cache_matcher->refcnt);
1088                         dev_flow->dv.matcher = cache_matcher;
1089                         return 0;
1090                 }
1091         }
1092         /* Register new matcher. */
1093         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
1094         if (!cache_matcher)
1095                 return rte_flow_error_set(error, ENOMEM,
1096                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1097                                           "cannot allocate matcher memory");
1098         *cache_matcher = *matcher;
1099         dv_attr.match_criteria_enable =
1100                 flow_dv_matcher_enable(cache_matcher->mask.buf);
1101         dv_attr.priority = matcher->priority;
1102         if (matcher->egress)
1103                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
1104         cache_matcher->matcher_object =
1105                 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
1106         if (!cache_matcher->matcher_object)
1107                 return rte_flow_error_set(error, ENOMEM,
1108                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1109                                           NULL, "cannot create matcher");
1110         rte_atomic32_inc(&cache_matcher->refcnt);
1111         LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
1112         dev_flow->dv.matcher = cache_matcher;
1113         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
1114                 cache_matcher->priority,
1115                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
1116                 rte_atomic32_read(&cache_matcher->refcnt));
1117         return 0;
1118 }
1119
1120
1121 /**
1122  * Fill the flow with DV spec.
1123  *
1124  * @param[in] dev
1125  *   Pointer to rte_eth_dev structure.
1126  * @param[in, out] dev_flow
1127  *   Pointer to the sub flow.
1128  * @param[in] attr
1129  *   Pointer to the flow attributes.
1130  * @param[in] items
1131  *   Pointer to the list of items.
1132  * @param[in] actions
1133  *   Pointer to the list of actions.
1134  * @param[out] error
1135  *   Pointer to the error structure.
1136  *
1137  * @return
1138  *   0 on success, a negative errno value otherwise and rte_ernno is set.
1139  */
1140 static int
1141 flow_dv_translate(struct rte_eth_dev *dev,
1142                   struct mlx5_flow *dev_flow,
1143                   const struct rte_flow_attr *attr,
1144                   const struct rte_flow_item items[],
1145                   const struct rte_flow_action actions[] __rte_unused,
1146                   struct rte_flow_error *error)
1147 {
1148         struct priv *priv = dev->data->dev_private;
1149         uint64_t priority = attr->priority;
1150         struct mlx5_flow_dv_matcher matcher = {
1151                 .mask = {
1152                         .size = sizeof(matcher.mask.buf),
1153                 },
1154         };
1155         void *match_value = dev_flow->dv.value.buf;
1156         uint8_t inner = 0;
1157
1158         if (priority == MLX5_FLOW_PRIO_RSVD)
1159                 priority = priv->config.flow_prio - 1;
1160         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++)
1161                 flow_dv_create_item(&matcher, match_value, items, dev_flow,
1162                                     inner);
1163         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
1164                                      matcher.mask.size);
1165         if (priority == MLX5_FLOW_PRIO_RSVD)
1166                 priority = priv->config.flow_prio - 1;
1167         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
1168                                                      matcher.priority);
1169         matcher.egress = attr->egress;
1170         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
1171                 return -rte_errno;
1172         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1173                 flow_dv_create_action(actions, dev_flow);
1174         return 0;
1175 }
1176
1177 /**
1178  * Apply the flow to the NIC.
1179  *
1180  * @param[in] dev
1181  *   Pointer to the Ethernet device structure.
1182  * @param[in, out] flow
1183  *   Pointer to flow structure.
1184  * @param[out] error
1185  *   Pointer to error structure.
1186  *
1187  * @return
1188  *   0 on success, a negative errno value otherwise and rte_errno is set.
1189  */
1190 static int
1191 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1192               struct rte_flow_error *error)
1193 {
1194         struct mlx5_flow_dv *dv;
1195         struct mlx5_flow *dev_flow;
1196         int n;
1197         int err;
1198
1199         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1200                 dv = &dev_flow->dv;
1201                 n = dv->actions_n;
1202                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1203                         dv->hrxq = mlx5_hrxq_drop_new(dev);
1204                         if (!dv->hrxq) {
1205                                 rte_flow_error_set
1206                                         (error, errno,
1207                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1208                                          "cannot get drop hash queue");
1209                                 goto error;
1210                         }
1211                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1212                         dv->actions[n].qp = dv->hrxq->qp;
1213                         n++;
1214                 } else {
1215                         struct mlx5_hrxq *hrxq;
1216                         hrxq = mlx5_hrxq_get(dev, flow->key,
1217                                              MLX5_RSS_HASH_KEY_LEN,
1218                                              dv->hash_fields,
1219                                              (*flow->queue),
1220                                              flow->rss.queue_num);
1221                         if (!hrxq)
1222                                 hrxq = mlx5_hrxq_new
1223                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
1224                                          dv->hash_fields, (*flow->queue),
1225                                          flow->rss.queue_num,
1226                                          !!(flow->layers &
1227                                             MLX5_FLOW_LAYER_TUNNEL));
1228                         if (!hrxq) {
1229                                 rte_flow_error_set
1230                                         (error, rte_errno,
1231                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1232                                          "cannot get hash queue");
1233                                 goto error;
1234                         }
1235                         dv->hrxq = hrxq;
1236                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1237                         dv->actions[n].qp = hrxq->qp;
1238                         n++;
1239                 }
1240                 dv->flow =
1241                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
1242                                                   (void *)&dv->value, n,
1243                                                   dv->actions);
1244                 if (!dv->flow) {
1245                         rte_flow_error_set(error, errno,
1246                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1247                                            NULL,
1248                                            "hardware refuses to create flow");
1249                         goto error;
1250                 }
1251         }
1252         return 0;
1253 error:
1254         err = rte_errno; /* Save rte_errno before cleanup. */
1255         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1256                 struct mlx5_flow_dv *dv = &dev_flow->dv;
1257                 if (dv->hrxq) {
1258                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1259                                 mlx5_hrxq_drop_release(dev);
1260                         else
1261                                 mlx5_hrxq_release(dev, dv->hrxq);
1262                         dv->hrxq = NULL;
1263                 }
1264         }
1265         rte_errno = err; /* Restore rte_errno. */
1266         return -rte_errno;
1267 }
1268
1269 /**
1270  * Release the flow matcher.
1271  *
1272  * @param dev
1273  *   Pointer to Ethernet device.
1274  * @param flow
1275  *   Pointer to mlx5_flow.
1276  *
1277  * @return
1278  *   1 while a reference on it exists, 0 when freed.
1279  */
1280 static int
1281 flow_dv_matcher_release(struct rte_eth_dev *dev,
1282                         struct mlx5_flow *flow)
1283 {
1284         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
1285
1286         assert(matcher->matcher_object);
1287         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
1288                 dev->data->port_id, (void *)matcher,
1289                 rte_atomic32_read(&matcher->refcnt));
1290         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
1291                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
1292                            (matcher->matcher_object));
1293                 LIST_REMOVE(matcher, next);
1294                 rte_free(matcher);
1295                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
1296                         dev->data->port_id, (void *)matcher);
1297                 return 0;
1298         }
1299         return 1;
1300 }
1301
1302 /**
1303  * Remove the flow from the NIC but keeps it in memory.
1304  *
1305  * @param[in] dev
1306  *   Pointer to Ethernet device.
1307  * @param[in, out] flow
1308  *   Pointer to flow structure.
1309  */
1310 static void
1311 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1312 {
1313         struct mlx5_flow_dv *dv;
1314         struct mlx5_flow *dev_flow;
1315
1316         if (!flow)
1317                 return;
1318         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1319                 dv = &dev_flow->dv;
1320                 if (dv->flow) {
1321                         claim_zero(mlx5_glue->destroy_flow(dv->flow));
1322                         dv->flow = NULL;
1323                 }
1324                 if (dv->hrxq) {
1325                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1326                                 mlx5_hrxq_drop_release(dev);
1327                         else
1328                                 mlx5_hrxq_release(dev, dv->hrxq);
1329                         dv->hrxq = NULL;
1330                 }
1331         }
1332         if (flow->counter)
1333                 flow->counter = NULL;
1334 }
1335
1336 /**
1337  * Remove the flow from the NIC and the memory.
1338  *
1339  * @param[in] dev
1340  *   Pointer to the Ethernet device structure.
1341  * @param[in, out] flow
1342  *   Pointer to flow structure.
1343  */
1344 static void
1345 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1346 {
1347         struct mlx5_flow *dev_flow;
1348
1349         if (!flow)
1350                 return;
1351         flow_dv_remove(dev, flow);
1352         while (!LIST_EMPTY(&flow->dev_flows)) {
1353                 dev_flow = LIST_FIRST(&flow->dev_flows);
1354                 LIST_REMOVE(dev_flow, next);
1355                 if (dev_flow->dv.matcher)
1356                         flow_dv_matcher_release(dev, dev_flow);
1357                 rte_free(dev_flow);
1358         }
1359 }
1360
1361 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
1362         .validate = flow_dv_validate,
1363         .prepare = flow_dv_prepare,
1364         .translate = flow_dv_translate,
1365         .apply = flow_dv_apply,
1366         .remove = flow_dv_remove,
1367         .destroy = flow_dv_destroy,
1368 };
1369
1370 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */