net/mlx5: support e-switch TCP-flags flow filter
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
28 #include <rte_ip.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_prm.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 /**
39  * Verify the @p attributes will be correctly understood by the NIC and store
40  * them in the @p flow if everything is correct.
41  *
42  * @param[in] dev
43  *   Pointer to dev struct.
44  * @param[in] attributes
45  *   Pointer to flow attributes
46  * @param[out] error
47  *   Pointer to error structure.
48  *
49  * @return
50  *   0 on success, a negative errno value otherwise and rte_errno is set.
51  */
52 static int
53 flow_dv_validate_attributes(struct rte_eth_dev *dev,
54                             const struct rte_flow_attr *attributes,
55                             struct rte_flow_error *error)
56 {
57         struct priv *priv = dev->data->dev_private;
58         uint32_t priority_max = priv->config.flow_prio - 1;
59
60         if (attributes->group)
61                 return rte_flow_error_set(error, ENOTSUP,
62                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
63                                           NULL,
64                                           "groups is not supported");
65         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
66             attributes->priority >= priority_max)
67                 return rte_flow_error_set(error, ENOTSUP,
68                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
69                                           NULL,
70                                           "priority out of range");
71         if (attributes->transfer)
72                 return rte_flow_error_set(error, ENOTSUP,
73                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
74                                           NULL,
75                                           "transfer is not supported");
76         if (!(attributes->egress ^ attributes->ingress))
77                 return rte_flow_error_set(error, ENOTSUP,
78                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
79                                           "must specify exactly one of "
80                                           "ingress or egress");
81         return 0;
82 }
83
84 /**
85  * Internal validation function. For validating both actions and items.
86  *
87  * @param[in] dev
88  *   Pointer to the rte_eth_dev structure.
89  * @param[in] attr
90  *   Pointer to the flow attributes.
91  * @param[in] items
92  *   Pointer to the list of items.
93  * @param[in] actions
94  *   Pointer to the list of actions.
95  * @param[out] error
96  *   Pointer to the error structure.
97  *
98  * @return
99  *   0 on success, a negative errno value otherwise and rte_ernno is set.
100  */
101 static int
102 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
103                  const struct rte_flow_item items[],
104                  const struct rte_flow_action actions[],
105                  struct rte_flow_error *error)
106 {
107         int ret;
108         uint32_t action_flags = 0;
109         uint32_t item_flags = 0;
110         int tunnel = 0;
111         uint8_t next_protocol = 0xff;
112         int actions_n = 0;
113
114         if (items == NULL)
115                 return -1;
116         ret = flow_dv_validate_attributes(dev, attr, error);
117         if (ret < 0)
118                 return ret;
119         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
120                 switch (items->type) {
121                 case RTE_FLOW_ITEM_TYPE_VOID:
122                         break;
123                 case RTE_FLOW_ITEM_TYPE_ETH:
124                         ret = mlx5_flow_validate_item_eth(items, item_flags,
125                                                           error);
126                         if (ret < 0)
127                                 return ret;
128                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
129                                                MLX5_FLOW_LAYER_OUTER_L2;
130                         break;
131                 case RTE_FLOW_ITEM_TYPE_VLAN:
132                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
133                                                            error);
134                         if (ret < 0)
135                                 return ret;
136                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
137                                                MLX5_FLOW_LAYER_OUTER_VLAN;
138                         break;
139                 case RTE_FLOW_ITEM_TYPE_IPV4:
140                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
141                                                            error);
142                         if (ret < 0)
143                                 return ret;
144                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
145                                                MLX5_FLOW_LAYER_OUTER_L3_IPV4;
146                         if (items->mask != NULL &&
147                             ((const struct rte_flow_item_ipv4 *)
148                              items->mask)->hdr.next_proto_id)
149                                 next_protocol =
150                                         ((const struct rte_flow_item_ipv4 *)
151                                          (items->spec))->hdr.next_proto_id;
152                         break;
153                 case RTE_FLOW_ITEM_TYPE_IPV6:
154                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
155                                                            error);
156                         if (ret < 0)
157                                 return ret;
158                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
159                                                MLX5_FLOW_LAYER_OUTER_L3_IPV6;
160                         if (items->mask != NULL &&
161                             ((const struct rte_flow_item_ipv6 *)
162                              items->mask)->hdr.proto)
163                                 next_protocol =
164                                         ((const struct rte_flow_item_ipv6 *)
165                                          items->spec)->hdr.proto;
166                         break;
167                 case RTE_FLOW_ITEM_TYPE_UDP:
168                         ret = mlx5_flow_validate_item_udp(items, item_flags,
169                                                           next_protocol,
170                                                           error);
171                         if (ret < 0)
172                                 return ret;
173                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
174                                                MLX5_FLOW_LAYER_OUTER_L4_UDP;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_TCP:
177                         ret = mlx5_flow_validate_item_tcp
178                                                 (items, item_flags,
179                                                  next_protocol,
180                                                  &rte_flow_item_tcp_mask,
181                                                  error);
182                         if (ret < 0)
183                                 return ret;
184                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
185                                                MLX5_FLOW_LAYER_OUTER_L4_TCP;
186                         break;
187                 case RTE_FLOW_ITEM_TYPE_VXLAN:
188                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
189                                                             error);
190                         if (ret < 0)
191                                 return ret;
192                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
193                         break;
194                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
195                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
196                                                                 item_flags, dev,
197                                                                 error);
198                         if (ret < 0)
199                                 return ret;
200                         item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
201                         break;
202                 case RTE_FLOW_ITEM_TYPE_GRE:
203                         ret = mlx5_flow_validate_item_gre(items, item_flags,
204                                                           next_protocol, error);
205                         if (ret < 0)
206                                 return ret;
207                         item_flags |= MLX5_FLOW_LAYER_GRE;
208                         break;
209                 case RTE_FLOW_ITEM_TYPE_MPLS:
210                         ret = mlx5_flow_validate_item_mpls(items, item_flags,
211                                                            next_protocol,
212                                                            error);
213                         if (ret < 0)
214                                 return ret;
215                         item_flags |= MLX5_FLOW_LAYER_MPLS;
216                         break;
217                 default:
218                         return rte_flow_error_set(error, ENOTSUP,
219                                                   RTE_FLOW_ERROR_TYPE_ITEM,
220                                                   NULL, "item not supported");
221                 }
222         }
223         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
224                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
225                         return rte_flow_error_set(error, ENOTSUP,
226                                                   RTE_FLOW_ERROR_TYPE_ACTION,
227                                                   actions, "too many actions");
228                 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
229                 switch (actions->type) {
230                 case RTE_FLOW_ACTION_TYPE_VOID:
231                         break;
232                 case RTE_FLOW_ACTION_TYPE_FLAG:
233                         ret = mlx5_flow_validate_action_flag(action_flags,
234                                                              attr, error);
235                         if (ret < 0)
236                                 return ret;
237                         action_flags |= MLX5_FLOW_ACTION_FLAG;
238                         ++actions_n;
239                         break;
240                 case RTE_FLOW_ACTION_TYPE_MARK:
241                         ret = mlx5_flow_validate_action_mark(actions,
242                                                              action_flags,
243                                                              attr, error);
244                         if (ret < 0)
245                                 return ret;
246                         action_flags |= MLX5_FLOW_ACTION_MARK;
247                         ++actions_n;
248                         break;
249                 case RTE_FLOW_ACTION_TYPE_DROP:
250                         ret = mlx5_flow_validate_action_drop(action_flags,
251                                                              attr, error);
252                         if (ret < 0)
253                                 return ret;
254                         action_flags |= MLX5_FLOW_ACTION_DROP;
255                         ++actions_n;
256                         break;
257                 case RTE_FLOW_ACTION_TYPE_QUEUE:
258                         ret = mlx5_flow_validate_action_queue(actions,
259                                                               action_flags, dev,
260                                                               attr, error);
261                         if (ret < 0)
262                                 return ret;
263                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
264                         ++actions_n;
265                         break;
266                 case RTE_FLOW_ACTION_TYPE_RSS:
267                         ret = mlx5_flow_validate_action_rss(actions,
268                                                             action_flags, dev,
269                                                             attr, error);
270                         if (ret < 0)
271                                 return ret;
272                         action_flags |= MLX5_FLOW_ACTION_RSS;
273                         ++actions_n;
274                         break;
275                 case RTE_FLOW_ACTION_TYPE_COUNT:
276                         ret = mlx5_flow_validate_action_count(dev, attr, error);
277                         if (ret < 0)
278                                 return ret;
279                         action_flags |= MLX5_FLOW_ACTION_COUNT;
280                         ++actions_n;
281                         break;
282                 default:
283                         return rte_flow_error_set(error, ENOTSUP,
284                                                   RTE_FLOW_ERROR_TYPE_ACTION,
285                                                   actions,
286                                                   "action not supported");
287                 }
288         }
289         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
290                 return rte_flow_error_set(error, EINVAL,
291                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
292                                           "no fate action is found");
293         return 0;
294 }
295
296 /**
297  * Internal preparation function. Allocates the DV flow size,
298  * this size is constant.
299  *
300  * @param[in] attr
301  *   Pointer to the flow attributes.
302  * @param[in] items
303  *   Pointer to the list of items.
304  * @param[in] actions
305  *   Pointer to the list of actions.
306  * @param[out] item_flags
307  *   Pointer to bit mask of all items detected.
308  * @param[out] action_flags
309  *   Pointer to bit mask of all actions detected.
310  * @param[out] error
311  *   Pointer to the error structure.
312  *
313  * @return
314  *   Pointer to mlx5_flow object on success,
315  *   otherwise NULL and rte_ernno is set.
316  */
317 static struct mlx5_flow *
318 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
319                 const struct rte_flow_item items[] __rte_unused,
320                 const struct rte_flow_action actions[] __rte_unused,
321                 uint64_t *item_flags __rte_unused,
322                 uint64_t *action_flags __rte_unused,
323                 struct rte_flow_error *error)
324 {
325         uint32_t size = sizeof(struct mlx5_flow);
326         struct mlx5_flow *flow;
327
328         flow = rte_calloc(__func__, 1, size, 0);
329         if (!flow) {
330                 rte_flow_error_set(error, ENOMEM,
331                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
332                                    "not enough memory to create flow");
333                 return NULL;
334         }
335         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
336         return flow;
337 }
338
339 /**
340  * Add Ethernet item to matcher and to the value.
341  *
342  * @param[in, out] matcher
343  *   Flow matcher.
344  * @param[in, out] key
345  *   Flow matcher value.
346  * @param[in] item
347  *   Flow pattern to translate.
348  * @param[in] inner
349  *   Item is inner pattern.
350  */
351 static void
352 flow_dv_translate_item_eth(void *matcher, void *key,
353                            const struct rte_flow_item *item, int inner)
354 {
355         const struct rte_flow_item_eth *eth_m = item->mask;
356         const struct rte_flow_item_eth *eth_v = item->spec;
357         const struct rte_flow_item_eth nic_mask = {
358                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
359                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
360                 .type = RTE_BE16(0xffff),
361         };
362         void *headers_m;
363         void *headers_v;
364         char *l24_v;
365         unsigned int i;
366
367         if (!eth_v)
368                 return;
369         if (!eth_m)
370                 eth_m = &nic_mask;
371         if (inner) {
372                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
373                                          inner_headers);
374                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
375         } else {
376                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
377                                          outer_headers);
378                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
379         }
380         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
381                &eth_m->dst, sizeof(eth_m->dst));
382         /* The value must be in the range of the mask. */
383         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
384         for (i = 0; i < sizeof(eth_m->dst); ++i)
385                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
386         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
387                &eth_m->src, sizeof(eth_m->src));
388         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
389         /* The value must be in the range of the mask. */
390         for (i = 0; i < sizeof(eth_m->dst); ++i)
391                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
392         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
393                  rte_be_to_cpu_16(eth_m->type));
394         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
395         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
396 }
397
398 /**
399  * Add VLAN item to matcher and to the value.
400  *
401  * @param[in, out] matcher
402  *   Flow matcher.
403  * @param[in, out] key
404  *   Flow matcher value.
405  * @param[in] item
406  *   Flow pattern to translate.
407  * @param[in] inner
408  *   Item is inner pattern.
409  */
410 static void
411 flow_dv_translate_item_vlan(void *matcher, void *key,
412                             const struct rte_flow_item *item,
413                             int inner)
414 {
415         const struct rte_flow_item_vlan *vlan_m = item->mask;
416         const struct rte_flow_item_vlan *vlan_v = item->spec;
417         const struct rte_flow_item_vlan nic_mask = {
418                 .tci = RTE_BE16(0x0fff),
419                 .inner_type = RTE_BE16(0xffff),
420         };
421         void *headers_m;
422         void *headers_v;
423         uint16_t tci_m;
424         uint16_t tci_v;
425
426         if (!vlan_v)
427                 return;
428         if (!vlan_m)
429                 vlan_m = &nic_mask;
430         if (inner) {
431                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
432                                          inner_headers);
433                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
434         } else {
435                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
436                                          outer_headers);
437                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
438         }
439         tci_m = rte_be_to_cpu_16(vlan_m->tci);
440         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
441         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
442         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
443         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
444         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
445         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
446         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
447         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
448         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
449 }
450
451 /**
452  * Add IPV4 item to matcher and to the value.
453  *
454  * @param[in, out] matcher
455  *   Flow matcher.
456  * @param[in, out] key
457  *   Flow matcher value.
458  * @param[in] item
459  *   Flow pattern to translate.
460  * @param[in] inner
461  *   Item is inner pattern.
462  */
463 static void
464 flow_dv_translate_item_ipv4(void *matcher, void *key,
465                             const struct rte_flow_item *item,
466                             int inner)
467 {
468         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
469         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
470         const struct rte_flow_item_ipv4 nic_mask = {
471                 .hdr = {
472                         .src_addr = RTE_BE32(0xffffffff),
473                         .dst_addr = RTE_BE32(0xffffffff),
474                         .type_of_service = 0xff,
475                         .next_proto_id = 0xff,
476                 },
477         };
478         void *headers_m;
479         void *headers_v;
480         char *l24_m;
481         char *l24_v;
482         uint8_t tos;
483
484         if (!ipv4_v)
485                 return;
486         if (!ipv4_m)
487                 ipv4_m = &nic_mask;
488         if (inner) {
489                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
490                                          inner_headers);
491                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
492         } else {
493                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
494                                          outer_headers);
495                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
496         }
497         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
498         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
499         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
500                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
501         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
502                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
503         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
504         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
505         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
506                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
507         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
508                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
509         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
510         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
511         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
512         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
513                  ipv4_m->hdr.type_of_service);
514         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
515         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
516                  ipv4_m->hdr.type_of_service >> 2);
517         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
518         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
519                  ipv4_m->hdr.next_proto_id);
520         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
521                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
522 }
523
524 /**
525  * Add IPV6 item to matcher and to the value.
526  *
527  * @param[in, out] matcher
528  *   Flow matcher.
529  * @param[in, out] key
530  *   Flow matcher value.
531  * @param[in] item
532  *   Flow pattern to translate.
533  * @param[in] inner
534  *   Item is inner pattern.
535  */
536 static void
537 flow_dv_translate_item_ipv6(void *matcher, void *key,
538                             const struct rte_flow_item *item,
539                             int inner)
540 {
541         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
542         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
543         const struct rte_flow_item_ipv6 nic_mask = {
544                 .hdr = {
545                         .src_addr =
546                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
547                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
548                         .dst_addr =
549                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
550                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
551                         .vtc_flow = RTE_BE32(0xffffffff),
552                         .proto = 0xff,
553                         .hop_limits = 0xff,
554                 },
555         };
556         void *headers_m;
557         void *headers_v;
558         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
559         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
560         char *l24_m;
561         char *l24_v;
562         uint32_t vtc_m;
563         uint32_t vtc_v;
564         int i;
565         int size;
566
567         if (!ipv6_v)
568                 return;
569         if (!ipv6_m)
570                 ipv6_m = &nic_mask;
571         if (inner) {
572                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
573                                          inner_headers);
574                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
575         } else {
576                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
577                                          outer_headers);
578                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
579         }
580         size = sizeof(ipv6_m->hdr.dst_addr);
581         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
582                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
583         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
584                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
585         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
586         for (i = 0; i < size; ++i)
587                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
588         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
589                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
590         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
591                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
592         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
593         for (i = 0; i < size; ++i)
594                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
595         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
596         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
597         /* TOS. */
598         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
599         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
600         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
601         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
602         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
603         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
604         /* Label. */
605         if (inner) {
606                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
607                          vtc_m);
608                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
609                          vtc_v);
610         } else {
611                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
612                          vtc_m);
613                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
614                          vtc_v);
615         }
616         /* Protocol. */
617         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
618                  ipv6_m->hdr.proto);
619         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
620                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
621 }
622
623 /**
624  * Add TCP item to matcher and to the value.
625  *
626  * @param[in, out] matcher
627  *   Flow matcher.
628  * @param[in, out] key
629  *   Flow matcher value.
630  * @param[in] item
631  *   Flow pattern to translate.
632  * @param[in] inner
633  *   Item is inner pattern.
634  */
635 static void
636 flow_dv_translate_item_tcp(void *matcher, void *key,
637                            const struct rte_flow_item *item,
638                            int inner)
639 {
640         const struct rte_flow_item_tcp *tcp_m = item->mask;
641         const struct rte_flow_item_tcp *tcp_v = item->spec;
642         void *headers_m;
643         void *headers_v;
644
645         if (!tcp_v)
646                 return;
647         if (!tcp_m)
648                 tcp_m = &rte_flow_item_tcp_mask;
649         if (inner) {
650                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
651                                          inner_headers);
652                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
653         } else {
654                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
655                                          outer_headers);
656                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
657         }
658         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
659         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
660         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
661                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
662         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
663                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
664         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
665                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
666         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
667                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
668 }
669
670 /**
671  * Add UDP item to matcher and to the value.
672  *
673  * @param[in, out] matcher
674  *   Flow matcher.
675  * @param[in, out] key
676  *   Flow matcher value.
677  * @param[in] item
678  *   Flow pattern to translate.
679  * @param[in] inner
680  *   Item is inner pattern.
681  */
682 static void
683 flow_dv_translate_item_udp(void *matcher, void *key,
684                            const struct rte_flow_item *item,
685                            int inner)
686 {
687         const struct rte_flow_item_udp *udp_m = item->mask;
688         const struct rte_flow_item_udp *udp_v = item->spec;
689         void *headers_m;
690         void *headers_v;
691
692         if (!udp_v)
693                 return;
694         if (!udp_m)
695                 udp_m = &rte_flow_item_udp_mask;
696         if (inner) {
697                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
698                                          inner_headers);
699                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
700         } else {
701                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
702                                          outer_headers);
703                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
704         }
705         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
706         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
707         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
708                  rte_be_to_cpu_16(udp_m->hdr.src_port));
709         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
710                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
711         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
712                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
713         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
714                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
715 }
716
717 /**
718  * Add GRE item to matcher and to the value.
719  *
720  * @param[in, out] matcher
721  *   Flow matcher.
722  * @param[in, out] key
723  *   Flow matcher value.
724  * @param[in] item
725  *   Flow pattern to translate.
726  * @param[in] inner
727  *   Item is inner pattern.
728  */
729 static void
730 flow_dv_translate_item_gre(void *matcher, void *key,
731                            const struct rte_flow_item *item,
732                            int inner)
733 {
734         const struct rte_flow_item_gre *gre_m = item->mask;
735         const struct rte_flow_item_gre *gre_v = item->spec;
736         void *headers_m;
737         void *headers_v;
738         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
739         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
740
741         if (!gre_v)
742                 return;
743         if (!gre_m)
744                 gre_m = &rte_flow_item_gre_mask;
745         if (inner) {
746                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
747                                          inner_headers);
748                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
749         } else {
750                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
751                                          outer_headers);
752                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
753         }
754         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
755         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
756         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
757                  rte_be_to_cpu_16(gre_m->protocol));
758         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
759                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
760 }
761
762 /**
763  * Add NVGRE item to matcher and to the value.
764  *
765  * @param[in, out] matcher
766  *   Flow matcher.
767  * @param[in, out] key
768  *   Flow matcher value.
769  * @param[in] item
770  *   Flow pattern to translate.
771  * @param[in] inner
772  *   Item is inner pattern.
773  */
774 static void
775 flow_dv_translate_item_nvgre(void *matcher, void *key,
776                              const struct rte_flow_item *item,
777                              int inner)
778 {
779         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
780         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
781         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
782         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
783         char *gre_key_m;
784         char *gre_key_v;
785         int size;
786         int i;
787
788         if (!nvgre_v)
789                 return;
790         if (!nvgre_m)
791                 nvgre_m = &rte_flow_item_nvgre_mask;
792         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
793         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
794         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
795         memcpy(gre_key_m, nvgre_m->tni, size);
796         for (i = 0; i < size; ++i)
797                 gre_key_v[i] = gre_key_m[i] & ((const char *)(nvgre_v->tni))[i];
798         flow_dv_translate_item_gre(matcher, key, item, inner);
799 }
800
801 /**
802  * Add VXLAN item to matcher and to the value.
803  *
804  * @param[in, out] matcher
805  *   Flow matcher.
806  * @param[in, out] key
807  *   Flow matcher value.
808  * @param[in] item
809  *   Flow pattern to translate.
810  * @param[in] inner
811  *   Item is inner pattern.
812  */
813 static void
814 flow_dv_translate_item_vxlan(void *matcher, void *key,
815                              const struct rte_flow_item *item,
816                              int inner)
817 {
818         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
819         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
820         void *headers_m;
821         void *headers_v;
822         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
823         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
824         char *vni_m;
825         char *vni_v;
826         uint16_t dport;
827         int size;
828         int i;
829
830         if (!vxlan_v)
831                 return;
832         if (!vxlan_m)
833                 vxlan_m = &rte_flow_item_vxlan_mask;
834         if (inner) {
835                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
836                                          inner_headers);
837                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
838         } else {
839                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
840                                          outer_headers);
841                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
842         }
843         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
844                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
845         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
846                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
847                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
848         }
849         size = sizeof(vxlan_m->vni);
850         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
851         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
852         memcpy(vni_m, vxlan_m->vni, size);
853         for (i = 0; i < size; ++i)
854                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
855 }
856
857 /**
858  * Update the matcher and the value based the selected item.
859  *
860  * @param[in, out] matcher
861  *   Flow matcher.
862  * @param[in, out] key
863  *   Flow matcher value.
864  * @param[in] item
865  *   Flow pattern to translate.
866  * @param[in, out] dev_flow
867  *   Pointer to the mlx5_flow.
868  * @param[in] inner
869  *   Item is inner pattern.
870  */
871 static void
872 flow_dv_create_item(void *matcher, void *key,
873                     const struct rte_flow_item *item,
874                     struct mlx5_flow *dev_flow,
875                     int inner)
876 {
877         struct mlx5_flow_dv_matcher *tmatcher = matcher;
878
879         switch (item->type) {
880         case RTE_FLOW_ITEM_TYPE_VOID:
881         case RTE_FLOW_ITEM_TYPE_END:
882                 break;
883         case RTE_FLOW_ITEM_TYPE_ETH:
884                 flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
885                                            inner);
886                 tmatcher->priority = MLX5_PRIORITY_MAP_L2;
887                 break;
888         case RTE_FLOW_ITEM_TYPE_VLAN:
889                 flow_dv_translate_item_vlan(tmatcher->mask.buf, key, item,
890                                             inner);
891                 break;
892         case RTE_FLOW_ITEM_TYPE_IPV4:
893                 flow_dv_translate_item_ipv4(tmatcher->mask.buf, key, item,
894                                             inner);
895                 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
896                 dev_flow->dv.hash_fields |=
897                         mlx5_flow_hashfields_adjust(dev_flow, inner,
898                                                     MLX5_IPV4_LAYER_TYPES,
899                                                     MLX5_IPV4_IBV_RX_HASH);
900                 break;
901         case RTE_FLOW_ITEM_TYPE_IPV6:
902                 flow_dv_translate_item_ipv6(tmatcher->mask.buf, key, item,
903                                             inner);
904                 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
905                 dev_flow->dv.hash_fields |=
906                         mlx5_flow_hashfields_adjust(dev_flow, inner,
907                                                     MLX5_IPV6_LAYER_TYPES,
908                                                     MLX5_IPV6_IBV_RX_HASH);
909                 break;
910         case RTE_FLOW_ITEM_TYPE_TCP:
911                 flow_dv_translate_item_tcp(tmatcher->mask.buf, key, item,
912                                            inner);
913                 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
914                 dev_flow->dv.hash_fields |=
915                         mlx5_flow_hashfields_adjust(dev_flow, inner,
916                                                     ETH_RSS_TCP,
917                                                     (IBV_RX_HASH_SRC_PORT_TCP |
918                                                      IBV_RX_HASH_DST_PORT_TCP));
919                 break;
920         case RTE_FLOW_ITEM_TYPE_UDP:
921                 flow_dv_translate_item_udp(tmatcher->mask.buf, key, item,
922                                            inner);
923                 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
924                 dev_flow->verbs.hash_fields |=
925                         mlx5_flow_hashfields_adjust(dev_flow, inner,
926                                                     ETH_RSS_TCP,
927                                                     (IBV_RX_HASH_SRC_PORT_TCP |
928                                                      IBV_RX_HASH_DST_PORT_TCP));
929                 break;
930         case RTE_FLOW_ITEM_TYPE_NVGRE:
931                 flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
932                                              inner);
933                 break;
934         case RTE_FLOW_ITEM_TYPE_GRE:
935                 flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
936                                            inner);
937                 break;
938         case RTE_FLOW_ITEM_TYPE_VXLAN:
939         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
940                 flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
941                                              inner);
942                 break;
943         default:
944                 break;
945         }
946 }
947
948 /**
949  * Store the requested actions in an array.
950  *
951  * @param[in] action
952  *   Flow action to translate.
953  * @param[in, out] dev_flow
954  *   Pointer to the mlx5_flow.
955  */
956 static void
957 flow_dv_create_action(const struct rte_flow_action *action,
958                       struct mlx5_flow *dev_flow)
959 {
960         const struct rte_flow_action_queue *queue;
961         const struct rte_flow_action_rss *rss;
962         int actions_n = dev_flow->dv.actions_n;
963         struct rte_flow *flow = dev_flow->flow;
964
965         switch (action->type) {
966         case RTE_FLOW_ACTION_TYPE_VOID:
967                 break;
968         case RTE_FLOW_ACTION_TYPE_FLAG:
969                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
970                 dev_flow->dv.actions[actions_n].tag_value =
971                         MLX5_FLOW_MARK_DEFAULT;
972                 actions_n++;
973                 flow->actions |= MLX5_FLOW_ACTION_FLAG;
974                 break;
975         case RTE_FLOW_ACTION_TYPE_MARK:
976                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
977                 dev_flow->dv.actions[actions_n].tag_value =
978                         ((const struct rte_flow_action_mark *)
979                          (action->conf))->id;
980                 flow->actions |= MLX5_FLOW_ACTION_MARK;
981                 actions_n++;
982                 break;
983         case RTE_FLOW_ACTION_TYPE_DROP:
984                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_DROP;
985                 flow->actions |= MLX5_FLOW_ACTION_DROP;
986                 break;
987         case RTE_FLOW_ACTION_TYPE_QUEUE:
988                 queue = action->conf;
989                 flow->rss.queue_num = 1;
990                 (*flow->queue)[0] = queue->index;
991                 flow->actions |= MLX5_FLOW_ACTION_QUEUE;
992                 break;
993         case RTE_FLOW_ACTION_TYPE_RSS:
994                 rss = action->conf;
995                 if (flow->queue)
996                         memcpy((*flow->queue), rss->queue,
997                                rss->queue_num * sizeof(uint16_t));
998                 flow->rss.queue_num = rss->queue_num;
999                 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
1000                 flow->rss.types = rss->types;
1001                 flow->rss.level = rss->level;
1002                 /* Added to array only in apply since we need the QP */
1003                 flow->actions |= MLX5_FLOW_ACTION_RSS;
1004                 break;
1005         default:
1006                 break;
1007         }
1008         dev_flow->dv.actions_n = actions_n;
1009 }
1010
1011 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
1012
1013 #define HEADER_IS_ZERO(match_criteria, headers)                              \
1014         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
1015                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1016
1017 /**
1018  * Calculate flow matcher enable bitmap.
1019  *
1020  * @param match_criteria
1021  *   Pointer to flow matcher criteria.
1022  *
1023  * @return
1024  *   Bitmap of enabled fields.
1025  */
1026 static uint8_t
1027 flow_dv_matcher_enable(uint32_t *match_criteria)
1028 {
1029         uint8_t match_criteria_enable;
1030
1031         match_criteria_enable =
1032                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1033                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
1034         match_criteria_enable |=
1035                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1036                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
1037         match_criteria_enable |=
1038                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1039                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
1040         match_criteria_enable |=
1041                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
1042                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
1043
1044         return match_criteria_enable;
1045 }
1046
1047 /**
1048  * Register the flow matcher.
1049  *
1050  * @param dev[in, out]
1051  *   Pointer to rte_eth_dev structure.
1052  * @param[in, out] matcher
1053  *   Pointer to flow matcher.
1054  * @parm[in, out] dev_flow
1055  *   Pointer to the dev_flow.
1056  * @param[out] error
1057  *   pointer to error structure.
1058  *
1059  * @return
1060  *   0 on success otherwise -errno and errno is set.
1061  */
1062 static int
1063 flow_dv_matcher_register(struct rte_eth_dev *dev,
1064                          struct mlx5_flow_dv_matcher *matcher,
1065                          struct mlx5_flow *dev_flow,
1066                          struct rte_flow_error *error)
1067 {
1068         struct priv *priv = dev->data->dev_private;
1069         struct mlx5_flow_dv_matcher *cache_matcher;
1070         struct mlx5dv_flow_matcher_attr dv_attr = {
1071                 .type = IBV_FLOW_ATTR_NORMAL,
1072                 .match_mask = (void *)&matcher->mask,
1073         };
1074
1075         /* Lookup from cache. */
1076         LIST_FOREACH(cache_matcher, &priv->matchers, next) {
1077                 if (matcher->crc == cache_matcher->crc &&
1078                     matcher->priority == cache_matcher->priority &&
1079                     matcher->egress == cache_matcher->egress &&
1080                     !memcmp((const void *)matcher->mask.buf,
1081                             (const void *)cache_matcher->mask.buf,
1082                             cache_matcher->mask.size)) {
1083                         DRV_LOG(DEBUG,
1084                                 "priority %hd use %s matcher %p: refcnt %d++",
1085                                 cache_matcher->priority,
1086                                 cache_matcher->egress ? "tx" : "rx",
1087                                 (void *)cache_matcher,
1088                                 rte_atomic32_read(&cache_matcher->refcnt));
1089                         rte_atomic32_inc(&cache_matcher->refcnt);
1090                         dev_flow->dv.matcher = cache_matcher;
1091                         return 0;
1092                 }
1093         }
1094         /* Register new matcher. */
1095         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
1096         if (!cache_matcher)
1097                 return rte_flow_error_set(error, ENOMEM,
1098                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1099                                           "cannot allocate matcher memory");
1100         *cache_matcher = *matcher;
1101         dv_attr.match_criteria_enable =
1102                 flow_dv_matcher_enable(cache_matcher->mask.buf);
1103         dv_attr.priority = matcher->priority;
1104         if (matcher->egress)
1105                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
1106         cache_matcher->matcher_object =
1107                 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
1108         if (!cache_matcher->matcher_object)
1109                 return rte_flow_error_set(error, ENOMEM,
1110                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1111                                           NULL, "cannot create matcher");
1112         rte_atomic32_inc(&cache_matcher->refcnt);
1113         LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
1114         dev_flow->dv.matcher = cache_matcher;
1115         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
1116                 cache_matcher->priority,
1117                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
1118                 rte_atomic32_read(&cache_matcher->refcnt));
1119         return 0;
1120 }
1121
1122
1123 /**
1124  * Fill the flow with DV spec.
1125  *
1126  * @param[in] dev
1127  *   Pointer to rte_eth_dev structure.
1128  * @param[in, out] dev_flow
1129  *   Pointer to the sub flow.
1130  * @param[in] attr
1131  *   Pointer to the flow attributes.
1132  * @param[in] items
1133  *   Pointer to the list of items.
1134  * @param[in] actions
1135  *   Pointer to the list of actions.
1136  * @param[out] error
1137  *   Pointer to the error structure.
1138  *
1139  * @return
1140  *   0 on success, a negative errno value otherwise and rte_ernno is set.
1141  */
1142 static int
1143 flow_dv_translate(struct rte_eth_dev *dev,
1144                   struct mlx5_flow *dev_flow,
1145                   const struct rte_flow_attr *attr,
1146                   const struct rte_flow_item items[],
1147                   const struct rte_flow_action actions[] __rte_unused,
1148                   struct rte_flow_error *error)
1149 {
1150         struct priv *priv = dev->data->dev_private;
1151         uint64_t priority = attr->priority;
1152         struct mlx5_flow_dv_matcher matcher = {
1153                 .mask = {
1154                         .size = sizeof(matcher.mask.buf),
1155                 },
1156         };
1157         void *match_value = dev_flow->dv.value.buf;
1158         uint8_t inner = 0;
1159
1160         if (priority == MLX5_FLOW_PRIO_RSVD)
1161                 priority = priv->config.flow_prio - 1;
1162         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++)
1163                 flow_dv_create_item(&matcher, match_value, items, dev_flow,
1164                                     inner);
1165         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
1166                                      matcher.mask.size);
1167         if (priority == MLX5_FLOW_PRIO_RSVD)
1168                 priority = priv->config.flow_prio - 1;
1169         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
1170                                                      matcher.priority);
1171         matcher.egress = attr->egress;
1172         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
1173                 return -rte_errno;
1174         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1175                 flow_dv_create_action(actions, dev_flow);
1176         return 0;
1177 }
1178
1179 /**
1180  * Apply the flow to the NIC.
1181  *
1182  * @param[in] dev
1183  *   Pointer to the Ethernet device structure.
1184  * @param[in, out] flow
1185  *   Pointer to flow structure.
1186  * @param[out] error
1187  *   Pointer to error structure.
1188  *
1189  * @return
1190  *   0 on success, a negative errno value otherwise and rte_errno is set.
1191  */
1192 static int
1193 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1194               struct rte_flow_error *error)
1195 {
1196         struct mlx5_flow_dv *dv;
1197         struct mlx5_flow *dev_flow;
1198         int n;
1199         int err;
1200
1201         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1202                 dv = &dev_flow->dv;
1203                 n = dv->actions_n;
1204                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1205                         dv->hrxq = mlx5_hrxq_drop_new(dev);
1206                         if (!dv->hrxq) {
1207                                 rte_flow_error_set
1208                                         (error, errno,
1209                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1210                                          "cannot get drop hash queue");
1211                                 goto error;
1212                         }
1213                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1214                         dv->actions[n].qp = dv->hrxq->qp;
1215                         n++;
1216                 } else if (flow->actions &
1217                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
1218                         struct mlx5_hrxq *hrxq;
1219                         hrxq = mlx5_hrxq_get(dev, flow->key,
1220                                              MLX5_RSS_HASH_KEY_LEN,
1221                                              dv->hash_fields,
1222                                              (*flow->queue),
1223                                              flow->rss.queue_num);
1224                         if (!hrxq)
1225                                 hrxq = mlx5_hrxq_new
1226                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
1227                                          dv->hash_fields, (*flow->queue),
1228                                          flow->rss.queue_num,
1229                                          !!(flow->layers &
1230                                             MLX5_FLOW_LAYER_TUNNEL));
1231                         if (!hrxq) {
1232                                 rte_flow_error_set
1233                                         (error, rte_errno,
1234                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1235                                          "cannot get hash queue");
1236                                 goto error;
1237                         }
1238                         dv->hrxq = hrxq;
1239                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1240                         dv->actions[n].qp = hrxq->qp;
1241                         n++;
1242                 }
1243                 dv->flow =
1244                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
1245                                                   (void *)&dv->value, n,
1246                                                   dv->actions);
1247                 if (!dv->flow) {
1248                         rte_flow_error_set(error, errno,
1249                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1250                                            NULL,
1251                                            "hardware refuses to create flow");
1252                         goto error;
1253                 }
1254         }
1255         return 0;
1256 error:
1257         err = rte_errno; /* Save rte_errno before cleanup. */
1258         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1259                 struct mlx5_flow_dv *dv = &dev_flow->dv;
1260                 if (dv->hrxq) {
1261                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1262                                 mlx5_hrxq_drop_release(dev);
1263                         else
1264                                 mlx5_hrxq_release(dev, dv->hrxq);
1265                         dv->hrxq = NULL;
1266                 }
1267         }
1268         rte_errno = err; /* Restore rte_errno. */
1269         return -rte_errno;
1270 }
1271
1272 /**
1273  * Release the flow matcher.
1274  *
1275  * @param dev
1276  *   Pointer to Ethernet device.
1277  * @param flow
1278  *   Pointer to mlx5_flow.
1279  *
1280  * @return
1281  *   1 while a reference on it exists, 0 when freed.
1282  */
1283 static int
1284 flow_dv_matcher_release(struct rte_eth_dev *dev,
1285                         struct mlx5_flow *flow)
1286 {
1287         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
1288
1289         assert(matcher->matcher_object);
1290         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
1291                 dev->data->port_id, (void *)matcher,
1292                 rte_atomic32_read(&matcher->refcnt));
1293         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
1294                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
1295                            (matcher->matcher_object));
1296                 LIST_REMOVE(matcher, next);
1297                 rte_free(matcher);
1298                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
1299                         dev->data->port_id, (void *)matcher);
1300                 return 0;
1301         }
1302         return 1;
1303 }
1304
1305 /**
1306  * Remove the flow from the NIC but keeps it in memory.
1307  *
1308  * @param[in] dev
1309  *   Pointer to Ethernet device.
1310  * @param[in, out] flow
1311  *   Pointer to flow structure.
1312  */
1313 static void
1314 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1315 {
1316         struct mlx5_flow_dv *dv;
1317         struct mlx5_flow *dev_flow;
1318
1319         if (!flow)
1320                 return;
1321         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1322                 dv = &dev_flow->dv;
1323                 if (dv->flow) {
1324                         claim_zero(mlx5_glue->destroy_flow(dv->flow));
1325                         dv->flow = NULL;
1326                 }
1327                 if (dv->hrxq) {
1328                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1329                                 mlx5_hrxq_drop_release(dev);
1330                         else
1331                                 mlx5_hrxq_release(dev, dv->hrxq);
1332                         dv->hrxq = NULL;
1333                 }
1334         }
1335         if (flow->counter)
1336                 flow->counter = NULL;
1337 }
1338
1339 /**
1340  * Remove the flow from the NIC and the memory.
1341  *
1342  * @param[in] dev
1343  *   Pointer to the Ethernet device structure.
1344  * @param[in, out] flow
1345  *   Pointer to flow structure.
1346  */
1347 static void
1348 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1349 {
1350         struct mlx5_flow *dev_flow;
1351
1352         if (!flow)
1353                 return;
1354         flow_dv_remove(dev, flow);
1355         while (!LIST_EMPTY(&flow->dev_flows)) {
1356                 dev_flow = LIST_FIRST(&flow->dev_flows);
1357                 LIST_REMOVE(dev_flow, next);
1358                 if (dev_flow->dv.matcher)
1359                         flow_dv_matcher_release(dev, dev_flow);
1360                 rte_free(dev_flow);
1361         }
1362 }
1363
1364 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
1365         .validate = flow_dv_validate,
1366         .prepare = flow_dv_prepare,
1367         .translate = flow_dv_translate,
1368         .apply = flow_dv_apply,
1369         .remove = flow_dv_remove,
1370         .destroy = flow_dv_destroy,
1371 };
1372
1373 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */