net/mlx5: fix initialization of struct members
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_verbs.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
28 #include <rte_ip.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_prm.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35
36 #define VERBS_SPEC_INNER(item_flags) \
37         (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
38
39 /**
40  * Create Verbs flow counter with Verbs library.
41  *
42  * @param[in] dev
43  *   Pointer to the Ethernet device structure.
44  * @param[in, out] counter
45  *   mlx5 flow counter object, contains the counter id,
46  *   handle of created Verbs flow counter is returned
47  *   in cs field (if counters are supported).
48  *
49  * @return
50  *   0 On success else a negative errno value is returned
51  *   and rte_errno is set.
52  */
53 static int
54 flow_verbs_counter_create(struct rte_eth_dev *dev,
55                           struct mlx5_flow_counter *counter)
56 {
57 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
58         struct priv *priv = dev->data->dev_private;
59         struct ibv_counter_set_init_attr init = {
60                          .counter_set_id = counter->id};
61
62         counter->cs = mlx5_glue->create_counter_set(priv->ctx, &init);
63         if (!counter->cs) {
64                 rte_errno = ENOTSUP;
65                 return -ENOTSUP;
66         }
67         return 0;
68 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
69         struct priv *priv = dev->data->dev_private;
70         struct ibv_counters_init_attr init = {0};
71         struct ibv_counter_attach_attr attach;
72         int ret;
73
74         memset(&attach, 0, sizeof(attach));
75         counter->cs = mlx5_glue->create_counters(priv->ctx, &init);
76         if (!counter->cs) {
77                 rte_errno = ENOTSUP;
78                 return -ENOTSUP;
79         }
80         attach.counter_desc = IBV_COUNTER_PACKETS;
81         attach.index = 0;
82         ret = mlx5_glue->attach_counters(counter->cs, &attach, NULL);
83         if (!ret) {
84                 attach.counter_desc = IBV_COUNTER_BYTES;
85                 attach.index = 1;
86                 ret = mlx5_glue->attach_counters
87                                         (counter->cs, &attach, NULL);
88         }
89         if (ret) {
90                 claim_zero(mlx5_glue->destroy_counters(counter->cs));
91                 counter->cs = NULL;
92                 rte_errno = ret;
93                 return -ret;
94         }
95         return 0;
96 #else
97         (void)dev;
98         (void)counter;
99         rte_errno = ENOTSUP;
100         return -ENOTSUP;
101 #endif
102 }
103
104 /**
105  * Get a flow counter.
106  *
107  * @param[in] dev
108  *   Pointer to the Ethernet device structure.
109  * @param[in] shared
110  *   Indicate if this counter is shared with other flows.
111  * @param[in] id
112  *   Counter identifier.
113  *
114  * @return
115  *   A pointer to the counter, NULL otherwise and rte_errno is set.
116  */
117 static struct mlx5_flow_counter *
118 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
119 {
120         struct priv *priv = dev->data->dev_private;
121         struct mlx5_flow_counter *cnt;
122         int ret;
123
124         LIST_FOREACH(cnt, &priv->flow_counters, next) {
125                 if (!cnt->shared || cnt->shared != shared)
126                         continue;
127                 if (cnt->id != id)
128                         continue;
129                 cnt->ref_cnt++;
130                 return cnt;
131         }
132         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
133         if (!cnt) {
134                 rte_errno = ENOMEM;
135                 return NULL;
136         }
137         cnt->id = id;
138         cnt->shared = shared;
139         cnt->ref_cnt = 1;
140         cnt->hits = 0;
141         cnt->bytes = 0;
142         /* Create counter with Verbs. */
143         ret = flow_verbs_counter_create(dev, cnt);
144         if (!ret) {
145                 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
146                 return cnt;
147         }
148         /* Some error occurred in Verbs library. */
149         rte_free(cnt);
150         rte_errno = -ret;
151         return NULL;
152 }
153
154 /**
155  * Release a flow counter.
156  *
157  * @param[in] counter
158  *   Pointer to the counter handler.
159  */
160 static void
161 flow_verbs_counter_release(struct mlx5_flow_counter *counter)
162 {
163         if (--counter->ref_cnt == 0) {
164 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
165                 claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
166 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
167                 claim_zero(mlx5_glue->destroy_counters(counter->cs));
168 #endif
169                 LIST_REMOVE(counter, next);
170                 rte_free(counter);
171         }
172 }
173
174 /**
175  * Query a flow counter via Verbs library call.
176  *
177  * @see rte_flow_query()
178  * @see rte_flow_ops
179  */
180 static int
181 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
182                          struct rte_flow *flow, void *data,
183                          struct rte_flow_error *error)
184 {
185 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
186         defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
187         if (flow->actions & MLX5_FLOW_ACTION_COUNT) {
188                 struct rte_flow_query_count *qc = data;
189                 uint64_t counters[2] = {0, 0};
190 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
191                 struct ibv_query_counter_set_attr query_cs_attr = {
192                         .cs = flow->counter->cs,
193                         .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
194                 };
195                 struct ibv_counter_set_data query_out = {
196                         .out = counters,
197                         .outlen = 2 * sizeof(uint64_t),
198                 };
199                 int err = mlx5_glue->query_counter_set(&query_cs_attr,
200                                                        &query_out);
201 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
202                 int err = mlx5_glue->query_counters
203                                (flow->counter->cs, counters,
204                                 RTE_DIM(counters),
205                                 IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
206 #endif
207                 if (err)
208                         return rte_flow_error_set
209                                 (error, err,
210                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
211                                  NULL,
212                                  "cannot read counter");
213                 qc->hits_set = 1;
214                 qc->bytes_set = 1;
215                 qc->hits = counters[0] - flow->counter->hits;
216                 qc->bytes = counters[1] - flow->counter->bytes;
217                 if (qc->reset) {
218                         flow->counter->hits = counters[0];
219                         flow->counter->bytes = counters[1];
220                 }
221                 return 0;
222         }
223         return rte_flow_error_set(error, EINVAL,
224                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
225                                   NULL,
226                                   "flow does not have counter");
227 #else
228         (void)flow;
229         (void)data;
230         return rte_flow_error_set(error, ENOTSUP,
231                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
232                                   NULL,
233                                   "counters are not available");
234 #endif
235 }
236
237 /**
238  * Add a verbs item specification into @p verbs.
239  *
240  * @param[out] verbs
241  *   Pointer to verbs structure.
242  * @param[in] src
243  *   Create specification.
244  * @param[in] size
245  *   Size in bytes of the specification to copy.
246  */
247 static void
248 flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size)
249 {
250         void *dst;
251
252         if (!verbs)
253                 return;
254         assert(verbs->specs);
255         dst = (void *)(verbs->specs + verbs->size);
256         memcpy(dst, src, size);
257         ++verbs->attr->num_of_specs;
258         verbs->size += size;
259 }
260
261 /**
262  * Convert the @p item into a Verbs specification. This function assumes that
263  * the input is valid and that there is space to insert the requested item
264  * into the flow.
265  *
266  * @param[in, out] dev_flow
267  *   Pointer to dev_flow structure.
268  * @param[in] item
269  *   Item specification.
270  * @param[in] item_flags
271  *   Parsed item flags.
272  */
273 static void
274 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
275                               const struct rte_flow_item *item,
276                               uint64_t item_flags)
277 {
278         const struct rte_flow_item_eth *spec = item->spec;
279         const struct rte_flow_item_eth *mask = item->mask;
280         const unsigned int size = sizeof(struct ibv_flow_spec_eth);
281         struct ibv_flow_spec_eth eth = {
282                 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
283                 .size = size,
284         };
285
286         if (!mask)
287                 mask = &rte_flow_item_eth_mask;
288         if (spec) {
289                 unsigned int i;
290
291                 memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
292                 memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
293                 eth.val.ether_type = spec->type;
294                 memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
295                 memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
296                 eth.mask.ether_type = mask->type;
297                 /* Remove unwanted bits from values. */
298                 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
299                         eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
300                         eth.val.src_mac[i] &= eth.mask.src_mac[i];
301                 }
302                 eth.val.ether_type &= eth.mask.ether_type;
303         }
304         flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
305 }
306
307 /**
308  * Update the VLAN tag in the Verbs Ethernet specification.
309  * This function assumes that the input is valid and there is space to add
310  * the requested item.
311  *
312  * @param[in, out] attr
313  *   Pointer to Verbs attributes structure.
314  * @param[in] eth
315  *   Verbs structure containing the VLAN information to copy.
316  */
317 static void
318 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
319                             struct ibv_flow_spec_eth *eth)
320 {
321         unsigned int i;
322         const enum ibv_flow_spec_type search = eth->type;
323         struct ibv_spec_header *hdr = (struct ibv_spec_header *)
324                 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
325
326         for (i = 0; i != attr->num_of_specs; ++i) {
327                 if (hdr->type == search) {
328                         struct ibv_flow_spec_eth *e =
329                                 (struct ibv_flow_spec_eth *)hdr;
330
331                         e->val.vlan_tag = eth->val.vlan_tag;
332                         e->mask.vlan_tag = eth->mask.vlan_tag;
333                         e->val.ether_type = eth->val.ether_type;
334                         e->mask.ether_type = eth->mask.ether_type;
335                         break;
336                 }
337                 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
338         }
339 }
340
341 /**
342  * Convert the @p item into a Verbs specification. This function assumes that
343  * the input is valid and that there is space to insert the requested item
344  * into the flow.
345  *
346  * @param[in, out] dev_flow
347  *   Pointer to dev_flow structure.
348  * @param[in] item
349  *   Item specification.
350  * @param[in] item_flags
351  *   Parsed item flags.
352  */
353 static void
354 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
355                                const struct rte_flow_item *item,
356                                uint64_t item_flags)
357 {
358         const struct rte_flow_item_vlan *spec = item->spec;
359         const struct rte_flow_item_vlan *mask = item->mask;
360         unsigned int size = sizeof(struct ibv_flow_spec_eth);
361         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
362         struct ibv_flow_spec_eth eth = {
363                 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
364                 .size = size,
365         };
366         const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
367                                       MLX5_FLOW_LAYER_OUTER_L2;
368
369         if (!mask)
370                 mask = &rte_flow_item_vlan_mask;
371         if (spec) {
372                 eth.val.vlan_tag = spec->tci;
373                 eth.mask.vlan_tag = mask->tci;
374                 eth.val.vlan_tag &= eth.mask.vlan_tag;
375                 eth.val.ether_type = spec->inner_type;
376                 eth.mask.ether_type = mask->inner_type;
377                 eth.val.ether_type &= eth.mask.ether_type;
378         }
379         if (!(item_flags & l2m))
380                 flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
381         else
382                 flow_verbs_item_vlan_update(dev_flow->verbs.attr, &eth);
383 }
384
385 /**
386  * Convert the @p item into a Verbs specification. This function assumes that
387  * the input is valid and that there is space to insert the requested item
388  * into the flow.
389  *
390  * @param[in, out] dev_flow
391  *   Pointer to dev_flow structure.
392  * @param[in] item
393  *   Item specification.
394  * @param[in] item_flags
395  *   Parsed item flags.
396  */
397 static void
398 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
399                                const struct rte_flow_item *item,
400                                uint64_t item_flags)
401 {
402         const struct rte_flow_item_ipv4 *spec = item->spec;
403         const struct rte_flow_item_ipv4 *mask = item->mask;
404         unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
405         struct ibv_flow_spec_ipv4_ext ipv4 = {
406                 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
407                 .size = size,
408         };
409
410         if (!mask)
411                 mask = &rte_flow_item_ipv4_mask;
412         if (spec) {
413                 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
414                         .src_ip = spec->hdr.src_addr,
415                         .dst_ip = spec->hdr.dst_addr,
416                         .proto = spec->hdr.next_proto_id,
417                         .tos = spec->hdr.type_of_service,
418                 };
419                 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
420                         .src_ip = mask->hdr.src_addr,
421                         .dst_ip = mask->hdr.dst_addr,
422                         .proto = mask->hdr.next_proto_id,
423                         .tos = mask->hdr.type_of_service,
424                 };
425                 /* Remove unwanted bits from values. */
426                 ipv4.val.src_ip &= ipv4.mask.src_ip;
427                 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
428                 ipv4.val.proto &= ipv4.mask.proto;
429                 ipv4.val.tos &= ipv4.mask.tos;
430         }
431         flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
432 }
433
434 /**
435  * Convert the @p item into a Verbs specification. This function assumes that
436  * the input is valid and that there is space to insert the requested item
437  * into the flow.
438  *
439  * @param[in, out] dev_flow
440  *   Pointer to dev_flow structure.
441  * @param[in] item
442  *   Item specification.
443  * @param[in] item_flags
444  *   Parsed item flags.
445  */
446 static void
447 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
448                                const struct rte_flow_item *item,
449                                uint64_t item_flags)
450 {
451         const struct rte_flow_item_ipv6 *spec = item->spec;
452         const struct rte_flow_item_ipv6 *mask = item->mask;
453         unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
454         struct ibv_flow_spec_ipv6 ipv6 = {
455                 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
456                 .size = size,
457         };
458
459         if (!mask)
460                 mask = &rte_flow_item_ipv6_mask;
461         if (spec) {
462                 unsigned int i;
463                 uint32_t vtc_flow_val;
464                 uint32_t vtc_flow_mask;
465
466                 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
467                        RTE_DIM(ipv6.val.src_ip));
468                 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
469                        RTE_DIM(ipv6.val.dst_ip));
470                 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
471                        RTE_DIM(ipv6.mask.src_ip));
472                 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
473                        RTE_DIM(ipv6.mask.dst_ip));
474                 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
475                 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
476                 ipv6.val.flow_label =
477                         rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
478                                          IPV6_HDR_FL_SHIFT);
479                 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
480                                          IPV6_HDR_TC_SHIFT;
481                 ipv6.val.next_hdr = spec->hdr.proto;
482                 ipv6.val.hop_limit = spec->hdr.hop_limits;
483                 ipv6.mask.flow_label =
484                         rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
485                                          IPV6_HDR_FL_SHIFT);
486                 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
487                                           IPV6_HDR_TC_SHIFT;
488                 ipv6.mask.next_hdr = mask->hdr.proto;
489                 ipv6.mask.hop_limit = mask->hdr.hop_limits;
490                 /* Remove unwanted bits from values. */
491                 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
492                         ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
493                         ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
494                 }
495                 ipv6.val.flow_label &= ipv6.mask.flow_label;
496                 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
497                 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
498                 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
499         }
500         flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
501 }
502
503 /**
504  * Convert the @p item into a Verbs specification. This function assumes that
505  * the input is valid and that there is space to insert the requested item
506  * into the flow.
507  *
508  * @param[in, out] dev_flow
509  *   Pointer to dev_flow structure.
510  * @param[in] item
511  *   Item specification.
512  * @param[in] item_flags
513  *   Parsed item flags.
514  */
515 static void
516 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
517                               const struct rte_flow_item *item,
518                               uint64_t item_flags __rte_unused)
519 {
520         const struct rte_flow_item_tcp *spec = item->spec;
521         const struct rte_flow_item_tcp *mask = item->mask;
522         unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
523         struct ibv_flow_spec_tcp_udp tcp = {
524                 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
525                 .size = size,
526         };
527
528         if (!mask)
529                 mask = &rte_flow_item_tcp_mask;
530         if (spec) {
531                 tcp.val.dst_port = spec->hdr.dst_port;
532                 tcp.val.src_port = spec->hdr.src_port;
533                 tcp.mask.dst_port = mask->hdr.dst_port;
534                 tcp.mask.src_port = mask->hdr.src_port;
535                 /* Remove unwanted bits from values. */
536                 tcp.val.src_port &= tcp.mask.src_port;
537                 tcp.val.dst_port &= tcp.mask.dst_port;
538         }
539         flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
540 }
541
542 /**
543  * Convert the @p item into a Verbs specification. This function assumes that
544  * the input is valid and that there is space to insert the requested item
545  * into the flow.
546  *
547  * @param[in, out] dev_flow
548  *   Pointer to dev_flow structure.
549  * @param[in] item
550  *   Item specification.
551  * @param[in] item_flags
552  *   Parsed item flags.
553  */
554 static void
555 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
556                               const struct rte_flow_item *item,
557                               uint64_t item_flags __rte_unused)
558 {
559         const struct rte_flow_item_udp *spec = item->spec;
560         const struct rte_flow_item_udp *mask = item->mask;
561         unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
562         struct ibv_flow_spec_tcp_udp udp = {
563                 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
564                 .size = size,
565         };
566
567         if (!mask)
568                 mask = &rte_flow_item_udp_mask;
569         if (spec) {
570                 udp.val.dst_port = spec->hdr.dst_port;
571                 udp.val.src_port = spec->hdr.src_port;
572                 udp.mask.dst_port = mask->hdr.dst_port;
573                 udp.mask.src_port = mask->hdr.src_port;
574                 /* Remove unwanted bits from values. */
575                 udp.val.src_port &= udp.mask.src_port;
576                 udp.val.dst_port &= udp.mask.dst_port;
577         }
578         flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
579 }
580
581 /**
582  * Convert the @p item into a Verbs specification. This function assumes that
583  * the input is valid and that there is space to insert the requested item
584  * into the flow.
585  *
586  * @param[in, out] dev_flow
587  *   Pointer to dev_flow structure.
588  * @param[in] item
589  *   Item specification.
590  * @param[in] item_flags
591  *   Parsed item flags.
592  */
593 static void
594 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
595                                 const struct rte_flow_item *item,
596                                 uint64_t item_flags __rte_unused)
597 {
598         const struct rte_flow_item_vxlan *spec = item->spec;
599         const struct rte_flow_item_vxlan *mask = item->mask;
600         unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
601         struct ibv_flow_spec_tunnel vxlan = {
602                 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
603                 .size = size,
604         };
605         union vni {
606                 uint32_t vlan_id;
607                 uint8_t vni[4];
608         } id = { .vlan_id = 0, };
609
610         if (!mask)
611                 mask = &rte_flow_item_vxlan_mask;
612         if (spec) {
613                 memcpy(&id.vni[1], spec->vni, 3);
614                 vxlan.val.tunnel_id = id.vlan_id;
615                 memcpy(&id.vni[1], mask->vni, 3);
616                 vxlan.mask.tunnel_id = id.vlan_id;
617                 /* Remove unwanted bits from values. */
618                 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
619         }
620         flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
621 }
622
623 /**
624  * Convert the @p item into a Verbs specification. This function assumes that
625  * the input is valid and that there is space to insert the requested item
626  * into the flow.
627  *
628  * @param[in, out] dev_flow
629  *   Pointer to dev_flow structure.
630  * @param[in] item
631  *   Item specification.
632  * @param[in] item_flags
633  *   Parsed item flags.
634  */
635 static void
636 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
637                                     const struct rte_flow_item *item,
638                                     uint64_t item_flags __rte_unused)
639 {
640         const struct rte_flow_item_vxlan_gpe *spec = item->spec;
641         const struct rte_flow_item_vxlan_gpe *mask = item->mask;
642         unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
643         struct ibv_flow_spec_tunnel vxlan_gpe = {
644                 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
645                 .size = size,
646         };
647         union vni {
648                 uint32_t vlan_id;
649                 uint8_t vni[4];
650         } id = { .vlan_id = 0, };
651
652         if (!mask)
653                 mask = &rte_flow_item_vxlan_gpe_mask;
654         if (spec) {
655                 memcpy(&id.vni[1], spec->vni, 3);
656                 vxlan_gpe.val.tunnel_id = id.vlan_id;
657                 memcpy(&id.vni[1], mask->vni, 3);
658                 vxlan_gpe.mask.tunnel_id = id.vlan_id;
659                 /* Remove unwanted bits from values. */
660                 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
661         }
662         flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
663 }
664
665 /**
666  * Update the protocol in Verbs IPv4/IPv6 spec.
667  *
668  * @param[in, out] attr
669  *   Pointer to Verbs attributes structure.
670  * @param[in] search
671  *   Specification type to search in order to update the IP protocol.
672  * @param[in] protocol
673  *   Protocol value to set if none is present in the specification.
674  */
675 static void
676 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
677                                        enum ibv_flow_spec_type search,
678                                        uint8_t protocol)
679 {
680         unsigned int i;
681         struct ibv_spec_header *hdr = (struct ibv_spec_header *)
682                 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
683
684         if (!attr)
685                 return;
686         for (i = 0; i != attr->num_of_specs; ++i) {
687                 if (hdr->type == search) {
688                         union {
689                                 struct ibv_flow_spec_ipv4_ext *ipv4;
690                                 struct ibv_flow_spec_ipv6 *ipv6;
691                         } ip;
692
693                         switch (search) {
694                         case IBV_FLOW_SPEC_IPV4_EXT:
695                                 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
696                                 if (!ip.ipv4->val.proto) {
697                                         ip.ipv4->val.proto = protocol;
698                                         ip.ipv4->mask.proto = 0xff;
699                                 }
700                                 break;
701                         case IBV_FLOW_SPEC_IPV6:
702                                 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
703                                 if (!ip.ipv6->val.next_hdr) {
704                                         ip.ipv6->val.next_hdr = protocol;
705                                         ip.ipv6->mask.next_hdr = 0xff;
706                                 }
707                                 break;
708                         default:
709                                 break;
710                         }
711                         break;
712                 }
713                 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
714         }
715 }
716
717 /**
718  * Convert the @p item into a Verbs specification. This function assumes that
719  * the input is valid and that there is space to insert the requested item
720  * into the flow.
721  *
722  * @param[in, out] dev_flow
723  *   Pointer to dev_flow structure.
724  * @param[in] item
725  *   Item specification.
726  * @param[in] item_flags
727  *   Parsed item flags.
728  */
729 static void
730 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
731                               const struct rte_flow_item *item __rte_unused,
732                               uint64_t item_flags)
733 {
734         struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
735 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
736         unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
737         struct ibv_flow_spec_tunnel tunnel = {
738                 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
739                 .size = size,
740         };
741 #else
742         const struct rte_flow_item_gre *spec = item->spec;
743         const struct rte_flow_item_gre *mask = item->mask;
744         unsigned int size = sizeof(struct ibv_flow_spec_gre);
745         struct ibv_flow_spec_gre tunnel = {
746                 .type = IBV_FLOW_SPEC_GRE,
747                 .size = size,
748         };
749
750         if (!mask)
751                 mask = &rte_flow_item_gre_mask;
752         if (spec) {
753                 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
754                 tunnel.val.protocol = spec->protocol;
755                 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
756                 tunnel.mask.protocol = mask->protocol;
757                 /* Remove unwanted bits from values. */
758                 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
759                 tunnel.val.protocol &= tunnel.mask.protocol;
760                 tunnel.val.key &= tunnel.mask.key;
761         }
762 #endif
763         if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
764                 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
765                                                        IBV_FLOW_SPEC_IPV4_EXT,
766                                                        IPPROTO_GRE);
767         else
768                 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
769                                                        IBV_FLOW_SPEC_IPV6,
770                                                        IPPROTO_GRE);
771         flow_verbs_spec_add(verbs, &tunnel, size);
772 }
773
774 /**
775  * Convert the @p action into a Verbs specification. This function assumes that
776  * the input is valid and that there is space to insert the requested action
777  * into the flow. This function also return the action that was added.
778  *
779  * @param[in, out] dev_flow
780  *   Pointer to dev_flow structure.
781  * @param[in] item
782  *   Item specification.
783  * @param[in] item_flags
784  *   Parsed item flags.
785  */
786 static void
787 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
788                                const struct rte_flow_item *item __rte_unused,
789                                uint64_t item_flags __rte_unused)
790 {
791 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
792         const struct rte_flow_item_mpls *spec = item->spec;
793         const struct rte_flow_item_mpls *mask = item->mask;
794         unsigned int size = sizeof(struct ibv_flow_spec_mpls);
795         struct ibv_flow_spec_mpls mpls = {
796                 .type = IBV_FLOW_SPEC_MPLS,
797                 .size = size,
798         };
799
800         if (!mask)
801                 mask = &rte_flow_item_mpls_mask;
802         if (spec) {
803                 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
804                 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
805                 /* Remove unwanted bits from values.  */
806                 mpls.val.label &= mpls.mask.label;
807         }
808         flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
809 #endif
810 }
811
812 /**
813  * Convert the @p action into a Verbs specification. This function assumes that
814  * the input is valid and that there is space to insert the requested action
815  * into the flow.
816  *
817  * @param[in] dev_flow
818  *   Pointer to mlx5_flow.
819  * @param[in] action
820  *   Action configuration.
821  */
822 static void
823 flow_verbs_translate_action_drop
824         (struct mlx5_flow *dev_flow,
825          const struct rte_flow_action *action __rte_unused)
826 {
827         unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
828         struct ibv_flow_spec_action_drop drop = {
829                         .type = IBV_FLOW_SPEC_ACTION_DROP,
830                         .size = size,
831         };
832
833         flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
834 }
835
836 /**
837  * Convert the @p action into a Verbs specification. This function assumes that
838  * the input is valid and that there is space to insert the requested action
839  * into the flow.
840  *
841  * @param[in] dev_flow
842  *   Pointer to mlx5_flow.
843  * @param[in] action
844  *   Action configuration.
845  */
846 static void
847 flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow,
848                                   const struct rte_flow_action *action)
849 {
850         const struct rte_flow_action_queue *queue = action->conf;
851         struct rte_flow *flow = dev_flow->flow;
852
853         if (flow->queue)
854                 (*flow->queue)[0] = queue->index;
855         flow->rss.queue_num = 1;
856 }
857
858 /**
859  * Convert the @p action into a Verbs specification. This function assumes that
860  * the input is valid and that there is space to insert the requested action
861  * into the flow.
862  *
863  * @param[in] action
864  *   Action configuration.
865  * @param[in, out] action_flags
866  *   Pointer to the detected actions.
867  * @param[in] dev_flow
868  *   Pointer to mlx5_flow.
869  */
870 static void
871 flow_verbs_translate_action_rss(struct mlx5_flow *dev_flow,
872                                 const struct rte_flow_action *action)
873 {
874         const struct rte_flow_action_rss *rss = action->conf;
875         const uint8_t *rss_key;
876         struct rte_flow *flow = dev_flow->flow;
877
878         if (flow->queue)
879                 memcpy((*flow->queue), rss->queue,
880                        rss->queue_num * sizeof(uint16_t));
881         flow->rss.queue_num = rss->queue_num;
882         /* NULL RSS key indicates default RSS key. */
883         rss_key = !rss->key ? rss_hash_default_key : rss->key;
884         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
885         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
886         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
887         flow->rss.level = rss->level;
888 }
889
890 /**
891  * Convert the @p action into a Verbs specification. This function assumes that
892  * the input is valid and that there is space to insert the requested action
893  * into the flow.
894  *
895  * @param[in] dev_flow
896  *   Pointer to mlx5_flow.
897  * @param[in] action
898  *   Action configuration.
899  */
900 static void
901 flow_verbs_translate_action_flag
902         (struct mlx5_flow *dev_flow,
903          const struct rte_flow_action *action __rte_unused)
904 {
905         unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
906         struct ibv_flow_spec_action_tag tag = {
907                 .type = IBV_FLOW_SPEC_ACTION_TAG,
908                 .size = size,
909                 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
910         };
911
912         flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
913 }
914
915 /**
916  * Convert the @p action into a Verbs specification. This function assumes that
917  * the input is valid and that there is space to insert the requested action
918  * into the flow.
919  *
920  * @param[in] dev_flow
921  *   Pointer to mlx5_flow.
922  * @param[in] action
923  *   Action configuration.
924  */
925 static void
926 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
927                                  const struct rte_flow_action *action)
928 {
929         const struct rte_flow_action_mark *mark = action->conf;
930         unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
931         struct ibv_flow_spec_action_tag tag = {
932                 .type = IBV_FLOW_SPEC_ACTION_TAG,
933                 .size = size,
934                 .tag_id = mlx5_flow_mark_set(mark->id),
935         };
936
937         flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
938 }
939
940 /**
941  * Convert the @p action into a Verbs specification. This function assumes that
942  * the input is valid and that there is space to insert the requested action
943  * into the flow.
944  *
945  * @param[in] dev
946  *   Pointer to the Ethernet device structure.
947  * @param[in] action
948  *   Action configuration.
949  * @param[in] dev_flow
950  *   Pointer to mlx5_flow.
951  * @param[out] error
952  *   Pointer to error structure.
953  *
954  * @return
955  *   0 On success else a negative errno value is returned and rte_errno is set.
956  */
957 static int
958 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
959                                   const struct rte_flow_action *action,
960                                   struct rte_eth_dev *dev,
961                                   struct rte_flow_error *error)
962 {
963         const struct rte_flow_action_count *count = action->conf;
964         struct rte_flow *flow = dev_flow->flow;
965 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
966         defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
967         unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
968         struct ibv_flow_spec_counter_action counter = {
969                 .type = IBV_FLOW_SPEC_ACTION_COUNT,
970                 .size = size,
971         };
972 #endif
973
974         if (!flow->counter) {
975                 flow->counter = flow_verbs_counter_new(dev, count->shared,
976                                                        count->id);
977                 if (!flow->counter)
978                         return rte_flow_error_set(error, rte_errno,
979                                                   RTE_FLOW_ERROR_TYPE_ACTION,
980                                                   action,
981                                                   "cannot get counter"
982                                                   " context.");
983         }
984 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
985         counter.counter_set_handle = flow->counter->cs->handle;
986         flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
987 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
988         counter.counters = flow->counter->cs;
989         flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
990 #endif
991         return 0;
992 }
993
994 /**
995  * Internal validation function. For validating both actions and items.
996  *
997  * @param[in] dev
998  *   Pointer to the Ethernet device structure.
999  * @param[in] attr
1000  *   Pointer to the flow attributes.
1001  * @param[in] items
1002  *   Pointer to the list of items.
1003  * @param[in] actions
1004  *   Pointer to the list of actions.
1005  * @param[out] error
1006  *   Pointer to the error structure.
1007  *
1008  * @return
1009  *   0 on success, a negative errno value otherwise and rte_errno is set.
1010  */
1011 static int
1012 flow_verbs_validate(struct rte_eth_dev *dev,
1013                     const struct rte_flow_attr *attr,
1014                     const struct rte_flow_item items[],
1015                     const struct rte_flow_action actions[],
1016                     struct rte_flow_error *error)
1017 {
1018         int ret;
1019         uint64_t action_flags = 0;
1020         uint64_t item_flags = 0;
1021         uint8_t next_protocol = 0xff;
1022
1023         if (items == NULL)
1024                 return -1;
1025         ret = mlx5_flow_validate_attributes(dev, attr, error);
1026         if (ret < 0)
1027                 return ret;
1028         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1029                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1030                 int ret = 0;
1031
1032                 switch (items->type) {
1033                 case RTE_FLOW_ITEM_TYPE_VOID:
1034                         break;
1035                 case RTE_FLOW_ITEM_TYPE_ETH:
1036                         ret = mlx5_flow_validate_item_eth(items, item_flags,
1037                                                           error);
1038                         if (ret < 0)
1039                                 return ret;
1040                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1041                                                MLX5_FLOW_LAYER_OUTER_L2;
1042                         break;
1043                 case RTE_FLOW_ITEM_TYPE_VLAN:
1044                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
1045                                                            error);
1046                         if (ret < 0)
1047                                 return ret;
1048                         item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1049                                                 MLX5_FLOW_LAYER_INNER_VLAN) :
1050                                                (MLX5_FLOW_LAYER_OUTER_L2 |
1051                                                 MLX5_FLOW_LAYER_OUTER_VLAN);
1052                         break;
1053                 case RTE_FLOW_ITEM_TYPE_IPV4:
1054                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1055                                                            error);
1056                         if (ret < 0)
1057                                 return ret;
1058                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1059                                                MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1060                         if (items->mask != NULL &&
1061                             ((const struct rte_flow_item_ipv4 *)
1062                              items->mask)->hdr.next_proto_id) {
1063                                 next_protocol =
1064                                         ((const struct rte_flow_item_ipv4 *)
1065                                          (items->spec))->hdr.next_proto_id;
1066                                 next_protocol &=
1067                                         ((const struct rte_flow_item_ipv4 *)
1068                                          (items->mask))->hdr.next_proto_id;
1069                         } else {
1070                                 /* Reset for inner layer. */
1071                                 next_protocol = 0xff;
1072                         }
1073                         break;
1074                 case RTE_FLOW_ITEM_TYPE_IPV6:
1075                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1076                                                            error);
1077                         if (ret < 0)
1078                                 return ret;
1079                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1080                                                MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1081                         if (items->mask != NULL &&
1082                             ((const struct rte_flow_item_ipv6 *)
1083                              items->mask)->hdr.proto) {
1084                                 next_protocol =
1085                                         ((const struct rte_flow_item_ipv6 *)
1086                                          items->spec)->hdr.proto;
1087                                 next_protocol &=
1088                                         ((const struct rte_flow_item_ipv6 *)
1089                                          items->mask)->hdr.proto;
1090                         } else {
1091                                 /* Reset for inner layer. */
1092                                 next_protocol = 0xff;
1093                         }
1094                         break;
1095                 case RTE_FLOW_ITEM_TYPE_UDP:
1096                         ret = mlx5_flow_validate_item_udp(items, item_flags,
1097                                                           next_protocol,
1098                                                           error);
1099                         if (ret < 0)
1100                                 return ret;
1101                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1102                                                MLX5_FLOW_LAYER_OUTER_L4_UDP;
1103                         break;
1104                 case RTE_FLOW_ITEM_TYPE_TCP:
1105                         ret = mlx5_flow_validate_item_tcp
1106                                                 (items, item_flags,
1107                                                  next_protocol,
1108                                                  &rte_flow_item_tcp_mask,
1109                                                  error);
1110                         if (ret < 0)
1111                                 return ret;
1112                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1113                                                MLX5_FLOW_LAYER_OUTER_L4_TCP;
1114                         break;
1115                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1116                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1117                                                             error);
1118                         if (ret < 0)
1119                                 return ret;
1120                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
1121                         break;
1122                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1123                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
1124                                                                 item_flags,
1125                                                                 dev, error);
1126                         if (ret < 0)
1127                                 return ret;
1128                         item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1129                         break;
1130                 case RTE_FLOW_ITEM_TYPE_GRE:
1131                         ret = mlx5_flow_validate_item_gre(items, item_flags,
1132                                                           next_protocol, error);
1133                         if (ret < 0)
1134                                 return ret;
1135                         item_flags |= MLX5_FLOW_LAYER_GRE;
1136                         break;
1137                 case RTE_FLOW_ITEM_TYPE_MPLS:
1138                         ret = mlx5_flow_validate_item_mpls(items, item_flags,
1139                                                            next_protocol,
1140                                                            error);
1141                         if (ret < 0)
1142                                 return ret;
1143                         item_flags |= MLX5_FLOW_LAYER_MPLS;
1144                         break;
1145                 default:
1146                         return rte_flow_error_set(error, ENOTSUP,
1147                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1148                                                   NULL, "item not supported");
1149                 }
1150         }
1151         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1152                 switch (actions->type) {
1153                 case RTE_FLOW_ACTION_TYPE_VOID:
1154                         break;
1155                 case RTE_FLOW_ACTION_TYPE_FLAG:
1156                         ret = mlx5_flow_validate_action_flag(action_flags,
1157                                                              attr,
1158                                                              error);
1159                         if (ret < 0)
1160                                 return ret;
1161                         action_flags |= MLX5_FLOW_ACTION_FLAG;
1162                         break;
1163                 case RTE_FLOW_ACTION_TYPE_MARK:
1164                         ret = mlx5_flow_validate_action_mark(actions,
1165                                                              action_flags,
1166                                                              attr,
1167                                                              error);
1168                         if (ret < 0)
1169                                 return ret;
1170                         action_flags |= MLX5_FLOW_ACTION_MARK;
1171                         break;
1172                 case RTE_FLOW_ACTION_TYPE_DROP:
1173                         ret = mlx5_flow_validate_action_drop(action_flags,
1174                                                              attr,
1175                                                              error);
1176                         if (ret < 0)
1177                                 return ret;
1178                         action_flags |= MLX5_FLOW_ACTION_DROP;
1179                         break;
1180                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1181                         ret = mlx5_flow_validate_action_queue(actions,
1182                                                               action_flags, dev,
1183                                                               attr,
1184                                                               error);
1185                         if (ret < 0)
1186                                 return ret;
1187                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
1188                         break;
1189                 case RTE_FLOW_ACTION_TYPE_RSS:
1190                         ret = mlx5_flow_validate_action_rss(actions,
1191                                                             action_flags, dev,
1192                                                             attr,
1193                                                             error);
1194                         if (ret < 0)
1195                                 return ret;
1196                         action_flags |= MLX5_FLOW_ACTION_RSS;
1197                         break;
1198                 case RTE_FLOW_ACTION_TYPE_COUNT:
1199                         ret = mlx5_flow_validate_action_count(dev, attr, error);
1200                         if (ret < 0)
1201                                 return ret;
1202                         action_flags |= MLX5_FLOW_ACTION_COUNT;
1203                         break;
1204                 default:
1205                         return rte_flow_error_set(error, ENOTSUP,
1206                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1207                                                   actions,
1208                                                   "action not supported");
1209                 }
1210         }
1211         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1212                 return rte_flow_error_set(error, EINVAL,
1213                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
1214                                           "no fate action is found");
1215         return 0;
1216 }
1217
1218 /**
1219  * Calculate the required bytes that are needed for the action part of the verbs
1220  * flow.
1221  *
1222  * @param[in] actions
1223  *   Pointer to the list of actions.
1224  *
1225  * @return
1226  *   The size of the memory needed for all actions.
1227  */
1228 static int
1229 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1230 {
1231         int size = 0;
1232
1233         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1234                 switch (actions->type) {
1235                 case RTE_FLOW_ACTION_TYPE_VOID:
1236                         break;
1237                 case RTE_FLOW_ACTION_TYPE_FLAG:
1238                         size += sizeof(struct ibv_flow_spec_action_tag);
1239                         break;
1240                 case RTE_FLOW_ACTION_TYPE_MARK:
1241                         size += sizeof(struct ibv_flow_spec_action_tag);
1242                         break;
1243                 case RTE_FLOW_ACTION_TYPE_DROP:
1244                         size += sizeof(struct ibv_flow_spec_action_drop);
1245                         break;
1246                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1247                         break;
1248                 case RTE_FLOW_ACTION_TYPE_RSS:
1249                         break;
1250                 case RTE_FLOW_ACTION_TYPE_COUNT:
1251 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1252         defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1253                         size += sizeof(struct ibv_flow_spec_counter_action);
1254 #endif
1255                         break;
1256                 default:
1257                         break;
1258                 }
1259         }
1260         return size;
1261 }
1262
1263 /**
1264  * Calculate the required bytes that are needed for the item part of the verbs
1265  * flow.
1266  *
1267  * @param[in] items
1268  *   Pointer to the list of items.
1269  *
1270  * @return
1271  *   The size of the memory needed for all items.
1272  */
1273 static int
1274 flow_verbs_get_items_size(const struct rte_flow_item items[])
1275 {
1276         int size = 0;
1277
1278         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1279                 switch (items->type) {
1280                 case RTE_FLOW_ITEM_TYPE_VOID:
1281                         break;
1282                 case RTE_FLOW_ITEM_TYPE_ETH:
1283                         size += sizeof(struct ibv_flow_spec_eth);
1284                         break;
1285                 case RTE_FLOW_ITEM_TYPE_VLAN:
1286                         size += sizeof(struct ibv_flow_spec_eth);
1287                         break;
1288                 case RTE_FLOW_ITEM_TYPE_IPV4:
1289                         size += sizeof(struct ibv_flow_spec_ipv4_ext);
1290                         break;
1291                 case RTE_FLOW_ITEM_TYPE_IPV6:
1292                         size += sizeof(struct ibv_flow_spec_ipv6);
1293                         break;
1294                 case RTE_FLOW_ITEM_TYPE_UDP:
1295                         size += sizeof(struct ibv_flow_spec_tcp_udp);
1296                         break;
1297                 case RTE_FLOW_ITEM_TYPE_TCP:
1298                         size += sizeof(struct ibv_flow_spec_tcp_udp);
1299                         break;
1300                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1301                         size += sizeof(struct ibv_flow_spec_tunnel);
1302                         break;
1303                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1304                         size += sizeof(struct ibv_flow_spec_tunnel);
1305                         break;
1306 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1307                 case RTE_FLOW_ITEM_TYPE_GRE:
1308                         size += sizeof(struct ibv_flow_spec_gre);
1309                         break;
1310                 case RTE_FLOW_ITEM_TYPE_MPLS:
1311                         size += sizeof(struct ibv_flow_spec_mpls);
1312                         break;
1313 #else
1314                 case RTE_FLOW_ITEM_TYPE_GRE:
1315                         size += sizeof(struct ibv_flow_spec_tunnel);
1316                         break;
1317 #endif
1318                 default:
1319                         break;
1320                 }
1321         }
1322         return size;
1323 }
1324
1325 /**
1326  * Internal preparation function. Allocate mlx5_flow with the required size.
1327  * The required size is calculate based on the actions and items. This function
1328  * also returns the detected actions and items for later use.
1329  *
1330  * @param[in] attr
1331  *   Pointer to the flow attributes.
1332  * @param[in] items
1333  *   Pointer to the list of items.
1334  * @param[in] actions
1335  *   Pointer to the list of actions.
1336  * @param[out] error
1337  *   Pointer to the error structure.
1338  *
1339  * @return
1340  *   Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1341  *   is set.
1342  */
1343 static struct mlx5_flow *
1344 flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
1345                    const struct rte_flow_item items[],
1346                    const struct rte_flow_action actions[],
1347                    struct rte_flow_error *error)
1348 {
1349         uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
1350         struct mlx5_flow *flow;
1351
1352         size += flow_verbs_get_actions_size(actions);
1353         size += flow_verbs_get_items_size(items);
1354         flow = rte_calloc(__func__, 1, size, 0);
1355         if (!flow) {
1356                 rte_flow_error_set(error, ENOMEM,
1357                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1358                                    "not enough memory to create flow");
1359                 return NULL;
1360         }
1361         flow->verbs.attr = (void *)(flow + 1);
1362         flow->verbs.specs =
1363                 (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
1364         return flow;
1365 }
1366
1367 /**
1368  * Fill the flow with verb spec.
1369  *
1370  * @param[in] dev
1371  *   Pointer to Ethernet device.
1372  * @param[in, out] dev_flow
1373  *   Pointer to the mlx5 flow.
1374  * @param[in] attr
1375  *   Pointer to the flow attributes.
1376  * @param[in] items
1377  *   Pointer to the list of items.
1378  * @param[in] actions
1379  *   Pointer to the list of actions.
1380  * @param[out] error
1381  *   Pointer to the error structure.
1382  *
1383  * @return
1384  *   0 on success, else a negative errno value otherwise and rte_ernno is set.
1385  */
1386 static int
1387 flow_verbs_translate(struct rte_eth_dev *dev,
1388                      struct mlx5_flow *dev_flow,
1389                      const struct rte_flow_attr *attr,
1390                      const struct rte_flow_item items[],
1391                      const struct rte_flow_action actions[],
1392                      struct rte_flow_error *error)
1393 {
1394         struct rte_flow *flow = dev_flow->flow;
1395         uint64_t item_flags = 0;
1396         uint64_t action_flags = 0;
1397         uint64_t priority = attr->priority;
1398         uint32_t subpriority = 0;
1399         struct priv *priv = dev->data->dev_private;
1400
1401         if (priority == MLX5_FLOW_PRIO_RSVD)
1402                 priority = priv->config.flow_prio - 1;
1403         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1404                 int ret;
1405
1406                 switch (actions->type) {
1407                 case RTE_FLOW_ACTION_TYPE_VOID:
1408                         break;
1409                 case RTE_FLOW_ACTION_TYPE_FLAG:
1410                         flow_verbs_translate_action_flag(dev_flow, actions);
1411                         action_flags |= MLX5_FLOW_ACTION_FLAG;
1412                         break;
1413                 case RTE_FLOW_ACTION_TYPE_MARK:
1414                         flow_verbs_translate_action_mark(dev_flow, actions);
1415                         action_flags |= MLX5_FLOW_ACTION_MARK;
1416                         break;
1417                 case RTE_FLOW_ACTION_TYPE_DROP:
1418                         flow_verbs_translate_action_drop(dev_flow, actions);
1419                         action_flags |= MLX5_FLOW_ACTION_DROP;
1420                         break;
1421                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1422                         flow_verbs_translate_action_queue(dev_flow, actions);
1423                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
1424                         break;
1425                 case RTE_FLOW_ACTION_TYPE_RSS:
1426                         flow_verbs_translate_action_rss(dev_flow, actions);
1427                         action_flags |= MLX5_FLOW_ACTION_RSS;
1428                         break;
1429                 case RTE_FLOW_ACTION_TYPE_COUNT:
1430                         ret = flow_verbs_translate_action_count(dev_flow,
1431                                                                 actions,
1432                                                                 dev, error);
1433                         if (ret < 0)
1434                                 return ret;
1435                         action_flags |= MLX5_FLOW_ACTION_COUNT;
1436                         break;
1437                 default:
1438                         return rte_flow_error_set(error, ENOTSUP,
1439                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1440                                                   actions,
1441                                                   "action not supported");
1442                 }
1443         }
1444         flow->actions = action_flags;
1445         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1446                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1447
1448                 switch (items->type) {
1449                 case RTE_FLOW_ITEM_TYPE_VOID:
1450                         break;
1451                 case RTE_FLOW_ITEM_TYPE_ETH:
1452                         flow_verbs_translate_item_eth(dev_flow, items,
1453                                                       item_flags);
1454                         subpriority = MLX5_PRIORITY_MAP_L2;
1455                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1456                                                MLX5_FLOW_LAYER_OUTER_L2;
1457                         break;
1458                 case RTE_FLOW_ITEM_TYPE_VLAN:
1459                         flow_verbs_translate_item_vlan(dev_flow, items,
1460                                                        item_flags);
1461                         subpriority = MLX5_PRIORITY_MAP_L2;
1462                         item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1463                                                 MLX5_FLOW_LAYER_INNER_VLAN) :
1464                                                (MLX5_FLOW_LAYER_OUTER_L2 |
1465                                                 MLX5_FLOW_LAYER_OUTER_VLAN);
1466                         break;
1467                 case RTE_FLOW_ITEM_TYPE_IPV4:
1468                         flow_verbs_translate_item_ipv4(dev_flow, items,
1469                                                        item_flags);
1470                         subpriority = MLX5_PRIORITY_MAP_L3;
1471                         dev_flow->verbs.hash_fields |=
1472                                 mlx5_flow_hashfields_adjust
1473                                         (dev_flow, tunnel,
1474                                          MLX5_IPV4_LAYER_TYPES,
1475                                          MLX5_IPV4_IBV_RX_HASH);
1476                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1477                                                MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1478                         break;
1479                 case RTE_FLOW_ITEM_TYPE_IPV6:
1480                         flow_verbs_translate_item_ipv6(dev_flow, items,
1481                                                        item_flags);
1482                         subpriority = MLX5_PRIORITY_MAP_L3;
1483                         dev_flow->verbs.hash_fields |=
1484                                 mlx5_flow_hashfields_adjust
1485                                         (dev_flow, tunnel,
1486                                          MLX5_IPV6_LAYER_TYPES,
1487                                          MLX5_IPV6_IBV_RX_HASH);
1488                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1489                                                MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1490                         break;
1491                 case RTE_FLOW_ITEM_TYPE_TCP:
1492                         flow_verbs_translate_item_tcp(dev_flow, items,
1493                                                       item_flags);
1494                         subpriority = MLX5_PRIORITY_MAP_L4;
1495                         dev_flow->verbs.hash_fields |=
1496                                 mlx5_flow_hashfields_adjust
1497                                         (dev_flow, tunnel, ETH_RSS_TCP,
1498                                          (IBV_RX_HASH_SRC_PORT_TCP |
1499                                           IBV_RX_HASH_DST_PORT_TCP));
1500                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1501                                                MLX5_FLOW_LAYER_OUTER_L4_TCP;
1502                         break;
1503                 case RTE_FLOW_ITEM_TYPE_UDP:
1504                         flow_verbs_translate_item_udp(dev_flow, items,
1505                                                       item_flags);
1506                         subpriority = MLX5_PRIORITY_MAP_L4;
1507                         dev_flow->verbs.hash_fields |=
1508                                 mlx5_flow_hashfields_adjust
1509                                         (dev_flow, tunnel, ETH_RSS_UDP,
1510                                          (IBV_RX_HASH_SRC_PORT_UDP |
1511                                           IBV_RX_HASH_DST_PORT_UDP));
1512                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1513                                                MLX5_FLOW_LAYER_OUTER_L4_UDP;
1514                         break;
1515                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1516                         flow_verbs_translate_item_vxlan(dev_flow, items,
1517                                                         item_flags);
1518                         subpriority = MLX5_PRIORITY_MAP_L2;
1519                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
1520                         break;
1521                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1522                         flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1523                                                             item_flags);
1524                         subpriority = MLX5_PRIORITY_MAP_L2;
1525                         item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1526                         break;
1527                 case RTE_FLOW_ITEM_TYPE_GRE:
1528                         flow_verbs_translate_item_gre(dev_flow, items,
1529                                                       item_flags);
1530                         subpriority = MLX5_PRIORITY_MAP_L2;
1531                         item_flags |= MLX5_FLOW_LAYER_GRE;
1532                         break;
1533                 case RTE_FLOW_ITEM_TYPE_MPLS:
1534                         flow_verbs_translate_item_mpls(dev_flow, items,
1535                                                        item_flags);
1536                         subpriority = MLX5_PRIORITY_MAP_L2;
1537                         item_flags |= MLX5_FLOW_LAYER_MPLS;
1538                         break;
1539                 default:
1540                         return rte_flow_error_set(error, ENOTSUP,
1541                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1542                                                   NULL,
1543                                                   "item not supported");
1544                 }
1545         }
1546         dev_flow->layers = item_flags;
1547         dev_flow->verbs.attr->priority =
1548                 mlx5_flow_adjust_priority(dev, priority, subpriority);
1549         return 0;
1550 }
1551
1552 /**
1553  * Remove the flow from the NIC but keeps it in memory.
1554  *
1555  * @param[in] dev
1556  *   Pointer to the Ethernet device structure.
1557  * @param[in, out] flow
1558  *   Pointer to flow structure.
1559  */
1560 static void
1561 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1562 {
1563         struct mlx5_flow_verbs *verbs;
1564         struct mlx5_flow *dev_flow;
1565
1566         if (!flow)
1567                 return;
1568         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1569                 verbs = &dev_flow->verbs;
1570                 if (verbs->flow) {
1571                         claim_zero(mlx5_glue->destroy_flow(verbs->flow));
1572                         verbs->flow = NULL;
1573                 }
1574                 if (verbs->hrxq) {
1575                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1576                                 mlx5_hrxq_drop_release(dev);
1577                         else
1578                                 mlx5_hrxq_release(dev, verbs->hrxq);
1579                         verbs->hrxq = NULL;
1580                 }
1581         }
1582 }
1583
1584 /**
1585  * Remove the flow from the NIC and the memory.
1586  *
1587  * @param[in] dev
1588  *   Pointer to the Ethernet device structure.
1589  * @param[in, out] flow
1590  *   Pointer to flow structure.
1591  */
1592 static void
1593 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1594 {
1595         struct mlx5_flow *dev_flow;
1596
1597         if (!flow)
1598                 return;
1599         flow_verbs_remove(dev, flow);
1600         while (!LIST_EMPTY(&flow->dev_flows)) {
1601                 dev_flow = LIST_FIRST(&flow->dev_flows);
1602                 LIST_REMOVE(dev_flow, next);
1603                 rte_free(dev_flow);
1604         }
1605         if (flow->counter) {
1606                 flow_verbs_counter_release(flow->counter);
1607                 flow->counter = NULL;
1608         }
1609 }
1610
1611 /**
1612  * Apply the flow to the NIC.
1613  *
1614  * @param[in] dev
1615  *   Pointer to the Ethernet device structure.
1616  * @param[in, out] flow
1617  *   Pointer to flow structure.
1618  * @param[out] error
1619  *   Pointer to error structure.
1620  *
1621  * @return
1622  *   0 on success, a negative errno value otherwise and rte_errno is set.
1623  */
1624 static int
1625 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1626                  struct rte_flow_error *error)
1627 {
1628         struct mlx5_flow_verbs *verbs;
1629         struct mlx5_flow *dev_flow;
1630         int err;
1631
1632         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1633                 verbs = &dev_flow->verbs;
1634                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1635                         verbs->hrxq = mlx5_hrxq_drop_new(dev);
1636                         if (!verbs->hrxq) {
1637                                 rte_flow_error_set
1638                                         (error, errno,
1639                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1640                                          "cannot get drop hash queue");
1641                                 goto error;
1642                         }
1643                 } else {
1644                         struct mlx5_hrxq *hrxq;
1645
1646                         hrxq = mlx5_hrxq_get(dev, flow->key,
1647                                              MLX5_RSS_HASH_KEY_LEN,
1648                                              verbs->hash_fields,
1649                                              (*flow->queue),
1650                                              flow->rss.queue_num);
1651                         if (!hrxq)
1652                                 hrxq = mlx5_hrxq_new(dev, flow->key,
1653                                                      MLX5_RSS_HASH_KEY_LEN,
1654                                                      verbs->hash_fields,
1655                                                      (*flow->queue),
1656                                                      flow->rss.queue_num,
1657                                                      !!(dev_flow->layers &
1658                                                       MLX5_FLOW_LAYER_TUNNEL));
1659                         if (!hrxq) {
1660                                 rte_flow_error_set
1661                                         (error, rte_errno,
1662                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1663                                          "cannot get hash queue");
1664                                 goto error;
1665                         }
1666                         verbs->hrxq = hrxq;
1667                 }
1668                 verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
1669                                                      verbs->attr);
1670                 if (!verbs->flow) {
1671                         rte_flow_error_set(error, errno,
1672                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1673                                            NULL,
1674                                            "hardware refuses to create flow");
1675                         goto error;
1676                 }
1677         }
1678         return 0;
1679 error:
1680         err = rte_errno; /* Save rte_errno before cleanup. */
1681         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1682                 verbs = &dev_flow->verbs;
1683                 if (verbs->hrxq) {
1684                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1685                                 mlx5_hrxq_drop_release(dev);
1686                         else
1687                                 mlx5_hrxq_release(dev, verbs->hrxq);
1688                         verbs->hrxq = NULL;
1689                 }
1690         }
1691         rte_errno = err; /* Restore rte_errno. */
1692         return -rte_errno;
1693 }
1694
1695 /**
1696  * Query a flow.
1697  *
1698  * @see rte_flow_query()
1699  * @see rte_flow_ops
1700  */
1701 static int
1702 flow_verbs_query(struct rte_eth_dev *dev,
1703                  struct rte_flow *flow,
1704                  const struct rte_flow_action *actions,
1705                  void *data,
1706                  struct rte_flow_error *error)
1707 {
1708         int ret = -EINVAL;
1709
1710         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1711                 switch (actions->type) {
1712                 case RTE_FLOW_ACTION_TYPE_VOID:
1713                         break;
1714                 case RTE_FLOW_ACTION_TYPE_COUNT:
1715                         ret = flow_verbs_counter_query(dev, flow, data, error);
1716                         break;
1717                 default:
1718                         return rte_flow_error_set(error, ENOTSUP,
1719                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1720                                                   actions,
1721                                                   "action not supported");
1722                 }
1723         }
1724         return ret;
1725 }
1726
1727 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
1728         .validate = flow_verbs_validate,
1729         .prepare = flow_verbs_prepare,
1730         .translate = flow_verbs_translate,
1731         .apply = flow_verbs_apply,
1732         .remove = flow_verbs_remove,
1733         .destroy = flow_verbs_destroy,
1734         .query = flow_verbs_query,
1735 };