58e3c33cba32fa72fa1828fa4c6ecc6b0e0f462a
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
28 #include <rte_ip.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_prm.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 /**
39  * Verify the @p attributes will be correctly understood by the NIC and store
40  * them in the @p flow if everything is correct.
41  *
42  * @param[in] dev
43  *   Pointer to dev struct.
44  * @param[in] attributes
45  *   Pointer to flow attributes
46  * @param[out] error
47  *   Pointer to error structure.
48  *
49  * @return
50  *   0 on success, a negative errno value otherwise and rte_errno is set.
51  */
52 static int
53 flow_dv_validate_attributes(struct rte_eth_dev *dev,
54                             const struct rte_flow_attr *attributes,
55                             struct rte_flow_error *error)
56 {
57         struct priv *priv = dev->data->dev_private;
58         uint32_t priority_max = priv->config.flow_prio - 1;
59
60         if (attributes->group)
61                 return rte_flow_error_set(error, ENOTSUP,
62                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
63                                           NULL,
64                                           "groups is not supported");
65         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
66             attributes->priority >= priority_max)
67                 return rte_flow_error_set(error, ENOTSUP,
68                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
69                                           NULL,
70                                           "priority out of range");
71         if (attributes->transfer)
72                 return rte_flow_error_set(error, ENOTSUP,
73                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
74                                           NULL,
75                                           "transfer is not supported");
76         if (!(attributes->egress ^ attributes->ingress))
77                 return rte_flow_error_set(error, ENOTSUP,
78                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
79                                           "must specify exactly one of "
80                                           "ingress or egress");
81         return 0;
82 }
83
84 /**
85  * Internal validation function. For validating both actions and items.
86  *
87  * @param[in] dev
88  *   Pointer to the rte_eth_dev structure.
89  * @param[in] attr
90  *   Pointer to the flow attributes.
91  * @param[in] items
92  *   Pointer to the list of items.
93  * @param[in] actions
94  *   Pointer to the list of actions.
95  * @param[out] error
96  *   Pointer to the error structure.
97  *
98  * @return
99  *   0 on success, a negative errno value otherwise and rte_ernno is set.
100  */
101 static int
102 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
103                  const struct rte_flow_item items[],
104                  const struct rte_flow_action actions[],
105                  struct rte_flow_error *error)
106 {
107         int ret;
108         uint32_t action_flags = 0;
109         uint32_t item_flags = 0;
110         int tunnel = 0;
111         uint8_t next_protocol = 0xff;
112         int actions_n = 0;
113
114         if (items == NULL)
115                 return -1;
116         ret = flow_dv_validate_attributes(dev, attr, error);
117         if (ret < 0)
118                 return ret;
119         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
120                 switch (items->type) {
121                 case RTE_FLOW_ITEM_TYPE_VOID:
122                         break;
123                 case RTE_FLOW_ITEM_TYPE_ETH:
124                         ret = mlx5_flow_validate_item_eth(items, item_flags,
125                                                           error);
126                         if (ret < 0)
127                                 return ret;
128                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
129                                                MLX5_FLOW_LAYER_OUTER_L2;
130                         break;
131                 case RTE_FLOW_ITEM_TYPE_VLAN:
132                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
133                                                            error);
134                         if (ret < 0)
135                                 return ret;
136                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
137                                                MLX5_FLOW_LAYER_OUTER_VLAN;
138                         break;
139                 case RTE_FLOW_ITEM_TYPE_IPV4:
140                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
141                                                            error);
142                         if (ret < 0)
143                                 return ret;
144                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
145                                                MLX5_FLOW_LAYER_OUTER_L3_IPV4;
146                         if (items->mask != NULL &&
147                             ((const struct rte_flow_item_ipv4 *)
148                              items->mask)->hdr.next_proto_id)
149                                 next_protocol =
150                                         ((const struct rte_flow_item_ipv4 *)
151                                          (items->spec))->hdr.next_proto_id;
152                         break;
153                 case RTE_FLOW_ITEM_TYPE_IPV6:
154                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
155                                                            error);
156                         if (ret < 0)
157                                 return ret;
158                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
159                                                MLX5_FLOW_LAYER_OUTER_L3_IPV6;
160                         if (items->mask != NULL &&
161                             ((const struct rte_flow_item_ipv6 *)
162                              items->mask)->hdr.proto)
163                                 next_protocol =
164                                         ((const struct rte_flow_item_ipv6 *)
165                                          items->spec)->hdr.proto;
166                         break;
167                 case RTE_FLOW_ITEM_TYPE_UDP:
168                         ret = mlx5_flow_validate_item_udp(items, item_flags,
169                                                           next_protocol,
170                                                           error);
171                         if (ret < 0)
172                                 return ret;
173                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
174                                                MLX5_FLOW_LAYER_OUTER_L4_UDP;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_TCP:
177                         ret = mlx5_flow_validate_item_tcp
178                                                 (items, item_flags,
179                                                  next_protocol,
180                                                  &rte_flow_item_tcp_mask,
181                                                  error);
182                         if (ret < 0)
183                                 return ret;
184                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
185                                                MLX5_FLOW_LAYER_OUTER_L4_TCP;
186                         break;
187                 case RTE_FLOW_ITEM_TYPE_VXLAN:
188                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
189                                                             error);
190                         if (ret < 0)
191                                 return ret;
192                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
193                         break;
194                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
195                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
196                                                                 item_flags, dev,
197                                                                 error);
198                         if (ret < 0)
199                                 return ret;
200                         item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
201                         break;
202                 case RTE_FLOW_ITEM_TYPE_GRE:
203                         ret = mlx5_flow_validate_item_gre(items, item_flags,
204                                                           next_protocol, error);
205                         if (ret < 0)
206                                 return ret;
207                         item_flags |= MLX5_FLOW_LAYER_GRE;
208                         break;
209                 case RTE_FLOW_ITEM_TYPE_MPLS:
210                         ret = mlx5_flow_validate_item_mpls(items, item_flags,
211                                                            next_protocol,
212                                                            error);
213                         if (ret < 0)
214                                 return ret;
215                         item_flags |= MLX5_FLOW_LAYER_MPLS;
216                         break;
217                 default:
218                         return rte_flow_error_set(error, ENOTSUP,
219                                                   RTE_FLOW_ERROR_TYPE_ITEM,
220                                                   NULL, "item not supported");
221                 }
222         }
223         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
224                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
225                         return rte_flow_error_set(error, ENOTSUP,
226                                                   RTE_FLOW_ERROR_TYPE_ACTION,
227                                                   actions, "too many actions");
228                 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
229                 switch (actions->type) {
230                 case RTE_FLOW_ACTION_TYPE_VOID:
231                         break;
232                 case RTE_FLOW_ACTION_TYPE_FLAG:
233                         ret = mlx5_flow_validate_action_flag(action_flags,
234                                                              attr, error);
235                         if (ret < 0)
236                                 return ret;
237                         action_flags |= MLX5_FLOW_ACTION_FLAG;
238                         ++actions_n;
239                         break;
240                 case RTE_FLOW_ACTION_TYPE_MARK:
241                         ret = mlx5_flow_validate_action_mark(actions,
242                                                              action_flags,
243                                                              attr, error);
244                         if (ret < 0)
245                                 return ret;
246                         action_flags |= MLX5_FLOW_ACTION_MARK;
247                         ++actions_n;
248                         break;
249                 case RTE_FLOW_ACTION_TYPE_DROP:
250                         ret = mlx5_flow_validate_action_drop(action_flags,
251                                                              attr, error);
252                         if (ret < 0)
253                                 return ret;
254                         action_flags |= MLX5_FLOW_ACTION_DROP;
255                         ++actions_n;
256                         break;
257                 case RTE_FLOW_ACTION_TYPE_QUEUE:
258                         ret = mlx5_flow_validate_action_queue(actions,
259                                                               action_flags, dev,
260                                                               attr, error);
261                         if (ret < 0)
262                                 return ret;
263                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
264                         ++actions_n;
265                         break;
266                 case RTE_FLOW_ACTION_TYPE_RSS:
267                         ret = mlx5_flow_validate_action_rss(actions,
268                                                             action_flags, dev,
269                                                             attr, error);
270                         if (ret < 0)
271                                 return ret;
272                         action_flags |= MLX5_FLOW_ACTION_RSS;
273                         ++actions_n;
274                         break;
275                 case RTE_FLOW_ACTION_TYPE_COUNT:
276                         ret = mlx5_flow_validate_action_count(dev, attr, error);
277                         if (ret < 0)
278                                 return ret;
279                         action_flags |= MLX5_FLOW_ACTION_COUNT;
280                         ++actions_n;
281                         break;
282                 default:
283                         return rte_flow_error_set(error, ENOTSUP,
284                                                   RTE_FLOW_ERROR_TYPE_ACTION,
285                                                   actions,
286                                                   "action not supported");
287                 }
288         }
289         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
290                 return rte_flow_error_set(error, EINVAL,
291                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
292                                           "no fate action is found");
293         return 0;
294 }
295
296 /**
297  * Internal preparation function. Allocates the DV flow size,
298  * this size is constant.
299  *
300  * @param[in] attr
301  *   Pointer to the flow attributes.
302  * @param[in] items
303  *   Pointer to the list of items.
304  * @param[in] actions
305  *   Pointer to the list of actions.
306  * @param[out] item_flags
307  *   Pointer to bit mask of all items detected.
308  * @param[out] action_flags
309  *   Pointer to bit mask of all actions detected.
310  * @param[out] error
311  *   Pointer to the error structure.
312  *
313  * @return
314  *   Pointer to mlx5_flow object on success,
315  *   otherwise NULL and rte_ernno is set.
316  */
317 static struct mlx5_flow *
318 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
319                 const struct rte_flow_item items[] __rte_unused,
320                 const struct rte_flow_action actions[] __rte_unused,
321                 uint64_t *item_flags __rte_unused,
322                 uint64_t *action_flags __rte_unused,
323                 struct rte_flow_error *error)
324 {
325         uint32_t size = sizeof(struct mlx5_flow);
326         struct mlx5_flow *flow;
327
328         flow = rte_calloc(__func__, 1, size, 0);
329         if (!flow) {
330                 rte_flow_error_set(error, ENOMEM,
331                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
332                                    "not enough memory to create flow");
333                 return NULL;
334         }
335         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
336         return flow;
337 }
338
339 /**
340  * Add Ethernet item to matcher and to the value.
341  *
342  * @param[in, out] matcher
343  *   Flow matcher.
344  * @param[in, out] key
345  *   Flow matcher value.
346  * @param[in] item
347  *   Flow pattern to translate.
348  * @param[in] inner
349  *   Item is inner pattern.
350  */
351 static void
352 flow_dv_translate_item_eth(void *matcher, void *key,
353                            const struct rte_flow_item *item, int inner)
354 {
355         const struct rte_flow_item_eth *eth_m = item->mask;
356         const struct rte_flow_item_eth *eth_v = item->spec;
357         const struct rte_flow_item_eth nic_mask = {
358                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
359                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
360                 .type = RTE_BE16(0xffff),
361         };
362         void *headers_m;
363         void *headers_v;
364         char *l24_v;
365         unsigned int i;
366
367         if (!eth_v)
368                 return;
369         if (!eth_m)
370                 eth_m = &nic_mask;
371         if (inner) {
372                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
373                                          inner_headers);
374                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
375         } else {
376                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
377                                          outer_headers);
378                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
379         }
380         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
381                &eth_m->dst, sizeof(eth_m->dst));
382         /* The value must be in the range of the mask. */
383         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
384         for (i = 0; i < sizeof(eth_m->dst); ++i)
385                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
386         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
387                &eth_m->src, sizeof(eth_m->src));
388         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
389         /* The value must be in the range of the mask. */
390         for (i = 0; i < sizeof(eth_m->dst); ++i)
391                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
392         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
393                  rte_be_to_cpu_16(eth_m->type));
394         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
395         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
396 }
397
398 /**
399  * Add VLAN item to matcher and to the value.
400  *
401  * @param[in, out] matcher
402  *   Flow matcher.
403  * @param[in, out] key
404  *   Flow matcher value.
405  * @param[in] item
406  *   Flow pattern to translate.
407  * @param[in] inner
408  *   Item is inner pattern.
409  */
410 static void
411 flow_dv_translate_item_vlan(void *matcher, void *key,
412                             const struct rte_flow_item *item,
413                             int inner)
414 {
415         const struct rte_flow_item_vlan *vlan_m = item->mask;
416         const struct rte_flow_item_vlan *vlan_v = item->spec;
417         const struct rte_flow_item_vlan nic_mask = {
418                 .tci = RTE_BE16(0x0fff),
419                 .inner_type = RTE_BE16(0xffff),
420         };
421         void *headers_m;
422         void *headers_v;
423         uint16_t tci_m;
424         uint16_t tci_v;
425
426         if (!vlan_v)
427                 return;
428         if (!vlan_m)
429                 vlan_m = &nic_mask;
430         if (inner) {
431                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
432                                          inner_headers);
433                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
434         } else {
435                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
436                                          outer_headers);
437                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
438         }
439         tci_m = rte_be_to_cpu_16(vlan_m->tci);
440         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
441         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
442         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
443         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
444         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
445         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
446         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
447         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
448         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
449 }
450
451 /**
452  * Add IPV4 item to matcher and to the value.
453  *
454  * @param[in, out] matcher
455  *   Flow matcher.
456  * @param[in, out] key
457  *   Flow matcher value.
458  * @param[in] item
459  *   Flow pattern to translate.
460  * @param[in] inner
461  *   Item is inner pattern.
462  */
463 static void
464 flow_dv_translate_item_ipv4(void *matcher, void *key,
465                             const struct rte_flow_item *item,
466                             int inner)
467 {
468         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
469         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
470         const struct rte_flow_item_ipv4 nic_mask = {
471                 .hdr = {
472                         .src_addr = RTE_BE32(0xffffffff),
473                         .dst_addr = RTE_BE32(0xffffffff),
474                         .type_of_service = 0xff,
475                         .next_proto_id = 0xff,
476                 },
477         };
478         void *headers_m;
479         void *headers_v;
480         char *l24_m;
481         char *l24_v;
482         uint8_t tos;
483
484         if (!ipv4_v)
485                 return;
486         if (!ipv4_m)
487                 ipv4_m = &nic_mask;
488         if (inner) {
489                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
490                                          inner_headers);
491                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
492         } else {
493                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
494                                          outer_headers);
495                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
496         }
497         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
498         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
499         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
500                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
501         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
502                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
503         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
504         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
505         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
506                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
507         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
508                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
509         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
510         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
511         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
512         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
513                  ipv4_m->hdr.type_of_service);
514         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
515         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
516                  ipv4_m->hdr.type_of_service >> 2);
517         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
518         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
519                  ipv4_m->hdr.next_proto_id);
520         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
521                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
522 }
523
524 /**
525  * Add IPV6 item to matcher and to the value.
526  *
527  * @param[in, out] matcher
528  *   Flow matcher.
529  * @param[in, out] key
530  *   Flow matcher value.
531  * @param[in] item
532  *   Flow pattern to translate.
533  * @param[in] inner
534  *   Item is inner pattern.
535  */
536 static void
537 flow_dv_translate_item_ipv6(void *matcher, void *key,
538                             const struct rte_flow_item *item,
539                             int inner)
540 {
541         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
542         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
543         const struct rte_flow_item_ipv6 nic_mask = {
544                 .hdr = {
545                         .src_addr =
546                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
547                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
548                         .dst_addr =
549                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
550                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
551                         .vtc_flow = RTE_BE32(0xffffffff),
552                         .proto = 0xff,
553                         .hop_limits = 0xff,
554                 },
555         };
556         void *headers_m;
557         void *headers_v;
558         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
559         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
560         char *l24_m;
561         char *l24_v;
562         uint32_t vtc_m;
563         uint32_t vtc_v;
564         int i;
565         int size;
566
567         if (!ipv6_v)
568                 return;
569         if (!ipv6_m)
570                 ipv6_m = &nic_mask;
571         if (inner) {
572                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
573                                          inner_headers);
574                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
575         } else {
576                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
577                                          outer_headers);
578                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
579         }
580         size = sizeof(ipv6_m->hdr.dst_addr);
581         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
582                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
583         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
584                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
585         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
586         for (i = 0; i < size; ++i)
587                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
588         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
589                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
590         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
591                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
592         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
593         for (i = 0; i < size; ++i)
594                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
595         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
596         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
597         /* TOS. */
598         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
599         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
600         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
601         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
602         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
603         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
604         /* Label. */
605         if (inner) {
606                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
607                          vtc_m);
608                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
609                          vtc_v);
610         } else {
611                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
612                          vtc_m);
613                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
614                          vtc_v);
615         }
616         /* Protocol. */
617         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
618                  ipv6_m->hdr.proto);
619         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
620                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
621 }
622
623 /**
624  * Add TCP item to matcher and to the value.
625  *
626  * @param[in, out] matcher
627  *   Flow matcher.
628  * @param[in, out] key
629  *   Flow matcher value.
630  * @param[in] item
631  *   Flow pattern to translate.
632  * @param[in] inner
633  *   Item is inner pattern.
634  */
635 static void
636 flow_dv_translate_item_tcp(void *matcher, void *key,
637                            const struct rte_flow_item *item,
638                            int inner)
639 {
640         const struct rte_flow_item_tcp *tcp_m = item->mask;
641         const struct rte_flow_item_tcp *tcp_v = item->spec;
642         void *headers_m;
643         void *headers_v;
644
645         if (!tcp_v)
646                 return;
647         if (!tcp_m)
648                 tcp_m = &rte_flow_item_tcp_mask;
649         if (inner) {
650                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
651                                          inner_headers);
652                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
653         } else {
654                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
655                                          outer_headers);
656                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
657         }
658         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
659         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
660         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
661                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
662         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
663                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
664         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
665                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
666         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
667                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
668 }
669
670 /**
671  * Add UDP item to matcher and to the value.
672  *
673  * @param[in, out] matcher
674  *   Flow matcher.
675  * @param[in, out] key
676  *   Flow matcher value.
677  * @param[in] item
678  *   Flow pattern to translate.
679  * @param[in] inner
680  *   Item is inner pattern.
681  */
682 static void
683 flow_dv_translate_item_udp(void *matcher, void *key,
684                            const struct rte_flow_item *item,
685                            int inner)
686 {
687         const struct rte_flow_item_udp *udp_m = item->mask;
688         const struct rte_flow_item_udp *udp_v = item->spec;
689         void *headers_m;
690         void *headers_v;
691
692         if (!udp_v)
693                 return;
694         if (!udp_m)
695                 udp_m = &rte_flow_item_udp_mask;
696         if (inner) {
697                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
698                                          inner_headers);
699                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
700         } else {
701                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
702                                          outer_headers);
703                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
704         }
705         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
706         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
707         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
708                  rte_be_to_cpu_16(udp_m->hdr.src_port));
709         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
710                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
711         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
712                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
713         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
714                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
715 }
716
717 /**
718  * Add GRE item to matcher and to the value.
719  *
720  * @param[in, out] matcher
721  *   Flow matcher.
722  * @param[in, out] key
723  *   Flow matcher value.
724  * @param[in] item
725  *   Flow pattern to translate.
726  * @param[in] inner
727  *   Item is inner pattern.
728  */
729 static void
730 flow_dv_translate_item_gre(void *matcher, void *key,
731                            const struct rte_flow_item *item,
732                            int inner)
733 {
734         const struct rte_flow_item_gre *gre_m = item->mask;
735         const struct rte_flow_item_gre *gre_v = item->spec;
736         void *headers_m;
737         void *headers_v;
738         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
739         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
740
741         if (!gre_v)
742                 return;
743         if (!gre_m)
744                 gre_m = &rte_flow_item_gre_mask;
745         if (inner) {
746                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
747                                          inner_headers);
748                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
749         } else {
750                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
751                                          outer_headers);
752                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
753         }
754         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
755         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
756         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
757                  rte_be_to_cpu_16(gre_m->protocol));
758         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
759                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
760 }
761
762 /**
763  * Add NVGRE item to matcher and to the value.
764  *
765  * @param[in, out] matcher
766  *   Flow matcher.
767  * @param[in, out] key
768  *   Flow matcher value.
769  * @param[in] item
770  *   Flow pattern to translate.
771  * @param[in] inner
772  *   Item is inner pattern.
773  */
774 static void
775 flow_dv_translate_item_nvgre(void *matcher, void *key,
776                              const struct rte_flow_item *item,
777                              int inner)
778 {
779         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
780         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
781         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
782         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
783         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
784         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
785         char *gre_key_m;
786         char *gre_key_v;
787         int size;
788         int i;
789
790         if (!nvgre_v)
791                 return;
792         if (!nvgre_m)
793                 nvgre_m = &rte_flow_item_nvgre_mask;
794         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
795         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
796         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
797         memcpy(gre_key_m, tni_flow_id_m, size);
798         for (i = 0; i < size; ++i)
799                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
800         flow_dv_translate_item_gre(matcher, key, item, inner);
801 }
802
803 /**
804  * Add VXLAN item to matcher and to the value.
805  *
806  * @param[in, out] matcher
807  *   Flow matcher.
808  * @param[in, out] key
809  *   Flow matcher value.
810  * @param[in] item
811  *   Flow pattern to translate.
812  * @param[in] inner
813  *   Item is inner pattern.
814  */
815 static void
816 flow_dv_translate_item_vxlan(void *matcher, void *key,
817                              const struct rte_flow_item *item,
818                              int inner)
819 {
820         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
821         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
822         void *headers_m;
823         void *headers_v;
824         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
825         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
826         char *vni_m;
827         char *vni_v;
828         uint16_t dport;
829         int size;
830         int i;
831
832         if (!vxlan_v)
833                 return;
834         if (!vxlan_m)
835                 vxlan_m = &rte_flow_item_vxlan_mask;
836         if (inner) {
837                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
838                                          inner_headers);
839                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
840         } else {
841                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
842                                          outer_headers);
843                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
844         }
845         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
846                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
847         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
848                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
849                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
850         }
851         size = sizeof(vxlan_m->vni);
852         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
853         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
854         memcpy(vni_m, vxlan_m->vni, size);
855         for (i = 0; i < size; ++i)
856                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
857 }
858
859 /**
860  * Update the matcher and the value based the selected item.
861  *
862  * @param[in, out] matcher
863  *   Flow matcher.
864  * @param[in, out] key
865  *   Flow matcher value.
866  * @param[in] item
867  *   Flow pattern to translate.
868  * @param[in, out] dev_flow
869  *   Pointer to the mlx5_flow.
870  * @param[in] inner
871  *   Item is inner pattern.
872  */
873 static void
874 flow_dv_create_item(void *matcher, void *key,
875                     const struct rte_flow_item *item,
876                     struct mlx5_flow *dev_flow,
877                     int inner)
878 {
879         struct mlx5_flow_dv_matcher *tmatcher = matcher;
880
881         switch (item->type) {
882         case RTE_FLOW_ITEM_TYPE_VOID:
883         case RTE_FLOW_ITEM_TYPE_END:
884                 break;
885         case RTE_FLOW_ITEM_TYPE_ETH:
886                 flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
887                                            inner);
888                 tmatcher->priority = MLX5_PRIORITY_MAP_L2;
889                 break;
890         case RTE_FLOW_ITEM_TYPE_VLAN:
891                 flow_dv_translate_item_vlan(tmatcher->mask.buf, key, item,
892                                             inner);
893                 break;
894         case RTE_FLOW_ITEM_TYPE_IPV4:
895                 flow_dv_translate_item_ipv4(tmatcher->mask.buf, key, item,
896                                             inner);
897                 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
898                 dev_flow->dv.hash_fields |=
899                         mlx5_flow_hashfields_adjust(dev_flow, inner,
900                                                     MLX5_IPV4_LAYER_TYPES,
901                                                     MLX5_IPV4_IBV_RX_HASH);
902                 break;
903         case RTE_FLOW_ITEM_TYPE_IPV6:
904                 flow_dv_translate_item_ipv6(tmatcher->mask.buf, key, item,
905                                             inner);
906                 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
907                 dev_flow->dv.hash_fields |=
908                         mlx5_flow_hashfields_adjust(dev_flow, inner,
909                                                     MLX5_IPV6_LAYER_TYPES,
910                                                     MLX5_IPV6_IBV_RX_HASH);
911                 break;
912         case RTE_FLOW_ITEM_TYPE_TCP:
913                 flow_dv_translate_item_tcp(tmatcher->mask.buf, key, item,
914                                            inner);
915                 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
916                 dev_flow->dv.hash_fields |=
917                         mlx5_flow_hashfields_adjust(dev_flow, inner,
918                                                     ETH_RSS_TCP,
919                                                     (IBV_RX_HASH_SRC_PORT_TCP |
920                                                      IBV_RX_HASH_DST_PORT_TCP));
921                 break;
922         case RTE_FLOW_ITEM_TYPE_UDP:
923                 flow_dv_translate_item_udp(tmatcher->mask.buf, key, item,
924                                            inner);
925                 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
926                 dev_flow->verbs.hash_fields |=
927                         mlx5_flow_hashfields_adjust(dev_flow, inner,
928                                                     ETH_RSS_TCP,
929                                                     (IBV_RX_HASH_SRC_PORT_TCP |
930                                                      IBV_RX_HASH_DST_PORT_TCP));
931                 break;
932         case RTE_FLOW_ITEM_TYPE_NVGRE:
933                 flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
934                                              inner);
935                 break;
936         case RTE_FLOW_ITEM_TYPE_GRE:
937                 flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
938                                            inner);
939                 break;
940         case RTE_FLOW_ITEM_TYPE_VXLAN:
941         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
942                 flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
943                                              inner);
944                 break;
945         default:
946                 break;
947         }
948 }
949
950 /**
951  * Store the requested actions in an array.
952  *
953  * @param[in] action
954  *   Flow action to translate.
955  * @param[in, out] dev_flow
956  *   Pointer to the mlx5_flow.
957  */
958 static void
959 flow_dv_create_action(const struct rte_flow_action *action,
960                       struct mlx5_flow *dev_flow)
961 {
962         const struct rte_flow_action_queue *queue;
963         const struct rte_flow_action_rss *rss;
964         int actions_n = dev_flow->dv.actions_n;
965         struct rte_flow *flow = dev_flow->flow;
966
967         switch (action->type) {
968         case RTE_FLOW_ACTION_TYPE_VOID:
969                 break;
970         case RTE_FLOW_ACTION_TYPE_FLAG:
971                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
972                 dev_flow->dv.actions[actions_n].tag_value =
973                         MLX5_FLOW_MARK_DEFAULT;
974                 actions_n++;
975                 flow->actions |= MLX5_FLOW_ACTION_FLAG;
976                 break;
977         case RTE_FLOW_ACTION_TYPE_MARK:
978                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
979                 dev_flow->dv.actions[actions_n].tag_value =
980                         ((const struct rte_flow_action_mark *)
981                          (action->conf))->id;
982                 flow->actions |= MLX5_FLOW_ACTION_MARK;
983                 actions_n++;
984                 break;
985         case RTE_FLOW_ACTION_TYPE_DROP:
986                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_DROP;
987                 flow->actions |= MLX5_FLOW_ACTION_DROP;
988                 break;
989         case RTE_FLOW_ACTION_TYPE_QUEUE:
990                 queue = action->conf;
991                 flow->rss.queue_num = 1;
992                 (*flow->queue)[0] = queue->index;
993                 flow->actions |= MLX5_FLOW_ACTION_QUEUE;
994                 break;
995         case RTE_FLOW_ACTION_TYPE_RSS:
996                 rss = action->conf;
997                 if (flow->queue)
998                         memcpy((*flow->queue), rss->queue,
999                                rss->queue_num * sizeof(uint16_t));
1000                 flow->rss.queue_num = rss->queue_num;
1001                 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
1002                 flow->rss.types = rss->types;
1003                 flow->rss.level = rss->level;
1004                 /* Added to array only in apply since we need the QP */
1005                 flow->actions |= MLX5_FLOW_ACTION_RSS;
1006                 break;
1007         default:
1008                 break;
1009         }
1010         dev_flow->dv.actions_n = actions_n;
1011 }
1012
1013 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
1014
1015 #define HEADER_IS_ZERO(match_criteria, headers)                              \
1016         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
1017                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1018
1019 /**
1020  * Calculate flow matcher enable bitmap.
1021  *
1022  * @param match_criteria
1023  *   Pointer to flow matcher criteria.
1024  *
1025  * @return
1026  *   Bitmap of enabled fields.
1027  */
1028 static uint8_t
1029 flow_dv_matcher_enable(uint32_t *match_criteria)
1030 {
1031         uint8_t match_criteria_enable;
1032
1033         match_criteria_enable =
1034                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1035                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
1036         match_criteria_enable |=
1037                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1038                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
1039         match_criteria_enable |=
1040                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1041                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
1042         match_criteria_enable |=
1043                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
1044                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
1045
1046         return match_criteria_enable;
1047 }
1048
1049 /**
1050  * Register the flow matcher.
1051  *
1052  * @param dev[in, out]
1053  *   Pointer to rte_eth_dev structure.
1054  * @param[in, out] matcher
1055  *   Pointer to flow matcher.
1056  * @parm[in, out] dev_flow
1057  *   Pointer to the dev_flow.
1058  * @param[out] error
1059  *   pointer to error structure.
1060  *
1061  * @return
1062  *   0 on success otherwise -errno and errno is set.
1063  */
1064 static int
1065 flow_dv_matcher_register(struct rte_eth_dev *dev,
1066                          struct mlx5_flow_dv_matcher *matcher,
1067                          struct mlx5_flow *dev_flow,
1068                          struct rte_flow_error *error)
1069 {
1070         struct priv *priv = dev->data->dev_private;
1071         struct mlx5_flow_dv_matcher *cache_matcher;
1072         struct mlx5dv_flow_matcher_attr dv_attr = {
1073                 .type = IBV_FLOW_ATTR_NORMAL,
1074                 .match_mask = (void *)&matcher->mask,
1075         };
1076
1077         /* Lookup from cache. */
1078         LIST_FOREACH(cache_matcher, &priv->matchers, next) {
1079                 if (matcher->crc == cache_matcher->crc &&
1080                     matcher->priority == cache_matcher->priority &&
1081                     matcher->egress == cache_matcher->egress &&
1082                     !memcmp((const void *)matcher->mask.buf,
1083                             (const void *)cache_matcher->mask.buf,
1084                             cache_matcher->mask.size)) {
1085                         DRV_LOG(DEBUG,
1086                                 "priority %hd use %s matcher %p: refcnt %d++",
1087                                 cache_matcher->priority,
1088                                 cache_matcher->egress ? "tx" : "rx",
1089                                 (void *)cache_matcher,
1090                                 rte_atomic32_read(&cache_matcher->refcnt));
1091                         rte_atomic32_inc(&cache_matcher->refcnt);
1092                         dev_flow->dv.matcher = cache_matcher;
1093                         return 0;
1094                 }
1095         }
1096         /* Register new matcher. */
1097         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
1098         if (!cache_matcher)
1099                 return rte_flow_error_set(error, ENOMEM,
1100                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1101                                           "cannot allocate matcher memory");
1102         *cache_matcher = *matcher;
1103         dv_attr.match_criteria_enable =
1104                 flow_dv_matcher_enable(cache_matcher->mask.buf);
1105         dv_attr.priority = matcher->priority;
1106         if (matcher->egress)
1107                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
1108         cache_matcher->matcher_object =
1109                 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
1110         if (!cache_matcher->matcher_object)
1111                 return rte_flow_error_set(error, ENOMEM,
1112                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1113                                           NULL, "cannot create matcher");
1114         rte_atomic32_inc(&cache_matcher->refcnt);
1115         LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
1116         dev_flow->dv.matcher = cache_matcher;
1117         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
1118                 cache_matcher->priority,
1119                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
1120                 rte_atomic32_read(&cache_matcher->refcnt));
1121         return 0;
1122 }
1123
1124
1125 /**
1126  * Fill the flow with DV spec.
1127  *
1128  * @param[in] dev
1129  *   Pointer to rte_eth_dev structure.
1130  * @param[in, out] dev_flow
1131  *   Pointer to the sub flow.
1132  * @param[in] attr
1133  *   Pointer to the flow attributes.
1134  * @param[in] items
1135  *   Pointer to the list of items.
1136  * @param[in] actions
1137  *   Pointer to the list of actions.
1138  * @param[out] error
1139  *   Pointer to the error structure.
1140  *
1141  * @return
1142  *   0 on success, a negative errno value otherwise and rte_ernno is set.
1143  */
1144 static int
1145 flow_dv_translate(struct rte_eth_dev *dev,
1146                   struct mlx5_flow *dev_flow,
1147                   const struct rte_flow_attr *attr,
1148                   const struct rte_flow_item items[],
1149                   const struct rte_flow_action actions[] __rte_unused,
1150                   struct rte_flow_error *error)
1151 {
1152         struct priv *priv = dev->data->dev_private;
1153         uint64_t priority = attr->priority;
1154         struct mlx5_flow_dv_matcher matcher = {
1155                 .mask = {
1156                         .size = sizeof(matcher.mask.buf),
1157                 },
1158         };
1159         void *match_value = dev_flow->dv.value.buf;
1160         uint8_t inner = 0;
1161
1162         if (priority == MLX5_FLOW_PRIO_RSVD)
1163                 priority = priv->config.flow_prio - 1;
1164         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++)
1165                 flow_dv_create_item(&matcher, match_value, items, dev_flow,
1166                                     inner);
1167         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
1168                                      matcher.mask.size);
1169         if (priority == MLX5_FLOW_PRIO_RSVD)
1170                 priority = priv->config.flow_prio - 1;
1171         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
1172                                                      matcher.priority);
1173         matcher.egress = attr->egress;
1174         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
1175                 return -rte_errno;
1176         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1177                 flow_dv_create_action(actions, dev_flow);
1178         return 0;
1179 }
1180
1181 /**
1182  * Apply the flow to the NIC.
1183  *
1184  * @param[in] dev
1185  *   Pointer to the Ethernet device structure.
1186  * @param[in, out] flow
1187  *   Pointer to flow structure.
1188  * @param[out] error
1189  *   Pointer to error structure.
1190  *
1191  * @return
1192  *   0 on success, a negative errno value otherwise and rte_errno is set.
1193  */
1194 static int
1195 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1196               struct rte_flow_error *error)
1197 {
1198         struct mlx5_flow_dv *dv;
1199         struct mlx5_flow *dev_flow;
1200         int n;
1201         int err;
1202
1203         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1204                 dv = &dev_flow->dv;
1205                 n = dv->actions_n;
1206                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1207                         dv->hrxq = mlx5_hrxq_drop_new(dev);
1208                         if (!dv->hrxq) {
1209                                 rte_flow_error_set
1210                                         (error, errno,
1211                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1212                                          "cannot get drop hash queue");
1213                                 goto error;
1214                         }
1215                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1216                         dv->actions[n].qp = dv->hrxq->qp;
1217                         n++;
1218                 } else if (flow->actions &
1219                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
1220                         struct mlx5_hrxq *hrxq;
1221                         hrxq = mlx5_hrxq_get(dev, flow->key,
1222                                              MLX5_RSS_HASH_KEY_LEN,
1223                                              dv->hash_fields,
1224                                              (*flow->queue),
1225                                              flow->rss.queue_num);
1226                         if (!hrxq)
1227                                 hrxq = mlx5_hrxq_new
1228                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
1229                                          dv->hash_fields, (*flow->queue),
1230                                          flow->rss.queue_num,
1231                                          !!(flow->layers &
1232                                             MLX5_FLOW_LAYER_TUNNEL));
1233                         if (!hrxq) {
1234                                 rte_flow_error_set
1235                                         (error, rte_errno,
1236                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1237                                          "cannot get hash queue");
1238                                 goto error;
1239                         }
1240                         dv->hrxq = hrxq;
1241                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1242                         dv->actions[n].qp = hrxq->qp;
1243                         n++;
1244                 }
1245                 dv->flow =
1246                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
1247                                                   (void *)&dv->value, n,
1248                                                   dv->actions);
1249                 if (!dv->flow) {
1250                         rte_flow_error_set(error, errno,
1251                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1252                                            NULL,
1253                                            "hardware refuses to create flow");
1254                         goto error;
1255                 }
1256         }
1257         return 0;
1258 error:
1259         err = rte_errno; /* Save rte_errno before cleanup. */
1260         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1261                 struct mlx5_flow_dv *dv = &dev_flow->dv;
1262                 if (dv->hrxq) {
1263                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1264                                 mlx5_hrxq_drop_release(dev);
1265                         else
1266                                 mlx5_hrxq_release(dev, dv->hrxq);
1267                         dv->hrxq = NULL;
1268                 }
1269         }
1270         rte_errno = err; /* Restore rte_errno. */
1271         return -rte_errno;
1272 }
1273
1274 /**
1275  * Release the flow matcher.
1276  *
1277  * @param dev
1278  *   Pointer to Ethernet device.
1279  * @param flow
1280  *   Pointer to mlx5_flow.
1281  *
1282  * @return
1283  *   1 while a reference on it exists, 0 when freed.
1284  */
1285 static int
1286 flow_dv_matcher_release(struct rte_eth_dev *dev,
1287                         struct mlx5_flow *flow)
1288 {
1289         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
1290
1291         assert(matcher->matcher_object);
1292         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
1293                 dev->data->port_id, (void *)matcher,
1294                 rte_atomic32_read(&matcher->refcnt));
1295         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
1296                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
1297                            (matcher->matcher_object));
1298                 LIST_REMOVE(matcher, next);
1299                 rte_free(matcher);
1300                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
1301                         dev->data->port_id, (void *)matcher);
1302                 return 0;
1303         }
1304         return 1;
1305 }
1306
1307 /**
1308  * Remove the flow from the NIC but keeps it in memory.
1309  *
1310  * @param[in] dev
1311  *   Pointer to Ethernet device.
1312  * @param[in, out] flow
1313  *   Pointer to flow structure.
1314  */
1315 static void
1316 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1317 {
1318         struct mlx5_flow_dv *dv;
1319         struct mlx5_flow *dev_flow;
1320
1321         if (!flow)
1322                 return;
1323         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1324                 dv = &dev_flow->dv;
1325                 if (dv->flow) {
1326                         claim_zero(mlx5_glue->destroy_flow(dv->flow));
1327                         dv->flow = NULL;
1328                 }
1329                 if (dv->hrxq) {
1330                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1331                                 mlx5_hrxq_drop_release(dev);
1332                         else
1333                                 mlx5_hrxq_release(dev, dv->hrxq);
1334                         dv->hrxq = NULL;
1335                 }
1336         }
1337         if (flow->counter)
1338                 flow->counter = NULL;
1339 }
1340
1341 /**
1342  * Remove the flow from the NIC and the memory.
1343  *
1344  * @param[in] dev
1345  *   Pointer to the Ethernet device structure.
1346  * @param[in, out] flow
1347  *   Pointer to flow structure.
1348  */
1349 static void
1350 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1351 {
1352         struct mlx5_flow *dev_flow;
1353
1354         if (!flow)
1355                 return;
1356         flow_dv_remove(dev, flow);
1357         while (!LIST_EMPTY(&flow->dev_flows)) {
1358                 dev_flow = LIST_FIRST(&flow->dev_flows);
1359                 LIST_REMOVE(dev_flow, next);
1360                 if (dev_flow->dv.matcher)
1361                         flow_dv_matcher_release(dev, dev_flow);
1362                 rte_free(dev_flow);
1363         }
1364 }
1365
1366 /**
1367  * Query a flow.
1368  *
1369  * @see rte_flow_query()
1370  * @see rte_flow_ops
1371  */
1372 static int
1373 flow_dv_query(struct rte_eth_dev *dev __rte_unused,
1374               struct rte_flow *flow __rte_unused,
1375               const struct rte_flow_action *actions __rte_unused,
1376               void *data __rte_unused,
1377               struct rte_flow_error *error __rte_unused)
1378 {
1379         rte_errno = ENOTSUP;
1380         return -rte_errno;
1381 }
1382
1383
1384 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
1385         .validate = flow_dv_validate,
1386         .prepare = flow_dv_prepare,
1387         .translate = flow_dv_translate,
1388         .apply = flow_dv_apply,
1389         .remove = flow_dv_remove,
1390         .destroy = flow_dv_destroy,
1391         .query = flow_dv_query,
1392 };
1393
1394 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */