net/mlx5: fix RSS flow rule with non existing queues
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2016 6WIND S.A.
5  *   Copyright 2016 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <string.h>
36
37 /* Verbs header. */
38 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
39 #ifdef PEDANTIC
40 #pragma GCC diagnostic ignored "-Wpedantic"
41 #endif
42 #include <infiniband/verbs.h>
43 #ifdef PEDANTIC
44 #pragma GCC diagnostic error "-Wpedantic"
45 #endif
46
47 #include <rte_ethdev.h>
48 #include <rte_flow.h>
49 #include <rte_flow_driver.h>
50 #include <rte_malloc.h>
51
52 #include "mlx5.h"
53 #include "mlx5_prm.h"
54
55 /* Number of Work Queue necessary for the DROP queue. */
56 #define MLX5_DROP_WQ_N 4
57
58 static int
59 mlx5_flow_create_eth(const struct rte_flow_item *item,
60                      const void *default_mask,
61                      void *data);
62
63 static int
64 mlx5_flow_create_vlan(const struct rte_flow_item *item,
65                       const void *default_mask,
66                       void *data);
67
68 static int
69 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
70                       const void *default_mask,
71                       void *data);
72
73 static int
74 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
75                       const void *default_mask,
76                       void *data);
77
78 static int
79 mlx5_flow_create_udp(const struct rte_flow_item *item,
80                      const void *default_mask,
81                      void *data);
82
83 static int
84 mlx5_flow_create_tcp(const struct rte_flow_item *item,
85                      const void *default_mask,
86                      void *data);
87
88 static int
89 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
90                        const void *default_mask,
91                        void *data);
92
93 struct rte_flow {
94         LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
95         struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
96         struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
97         struct ibv_qp *qp; /**< Verbs queue pair. */
98         struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */
99         struct ibv_exp_wq *wq; /**< Verbs work queue. */
100         struct ibv_cq *cq; /**< Verbs completion queue. */
101         struct rxq *(*rxqs)[]; /**< Pointer to the queues array. */
102         uint16_t rxqs_n; /**< Number of queues in this flow, 0 if drop queue. */
103         uint32_t mark:1; /**< Set if the flow is marked. */
104         uint32_t drop:1; /**< Drop queue. */
105         uint64_t hash_fields; /**< Fields that participate in the hash. */
106 };
107
108 /** Static initializer for items. */
109 #define ITEMS(...) \
110         (const enum rte_flow_item_type []){ \
111                 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
112         }
113
114 /** Structure to generate a simple graph of layers supported by the NIC. */
115 struct mlx5_flow_items {
116         /** List of possible actions for these items. */
117         const enum rte_flow_action_type *const actions;
118         /** Bit-masks corresponding to the possibilities for the item. */
119         const void *mask;
120         /**
121          * Default bit-masks to use when item->mask is not provided. When
122          * \default_mask is also NULL, the full supported bit-mask (\mask) is
123          * used instead.
124          */
125         const void *default_mask;
126         /** Bit-masks size in bytes. */
127         const unsigned int mask_sz;
128         /**
129          * Conversion function from rte_flow to NIC specific flow.
130          *
131          * @param item
132          *   rte_flow item to convert.
133          * @param default_mask
134          *   Default bit-masks to use when item->mask is not provided.
135          * @param data
136          *   Internal structure to store the conversion.
137          *
138          * @return
139          *   0 on success, negative value otherwise.
140          */
141         int (*convert)(const struct rte_flow_item *item,
142                        const void *default_mask,
143                        void *data);
144         /** Size in bytes of the destination structure. */
145         const unsigned int dst_sz;
146         /** List of possible following items.  */
147         const enum rte_flow_item_type *const items;
148 };
149
150 /** Valid action for this PMD. */
151 static const enum rte_flow_action_type valid_actions[] = {
152         RTE_FLOW_ACTION_TYPE_DROP,
153         RTE_FLOW_ACTION_TYPE_QUEUE,
154         RTE_FLOW_ACTION_TYPE_MARK,
155         RTE_FLOW_ACTION_TYPE_FLAG,
156         RTE_FLOW_ACTION_TYPE_END,
157 };
158
159 /** Graph of supported items and associated actions. */
160 static const struct mlx5_flow_items mlx5_flow_items[] = {
161         [RTE_FLOW_ITEM_TYPE_END] = {
162                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
163                                RTE_FLOW_ITEM_TYPE_VXLAN),
164         },
165         [RTE_FLOW_ITEM_TYPE_ETH] = {
166                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
167                                RTE_FLOW_ITEM_TYPE_IPV4,
168                                RTE_FLOW_ITEM_TYPE_IPV6),
169                 .actions = valid_actions,
170                 .mask = &(const struct rte_flow_item_eth){
171                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
172                         .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
173                         .type = -1,
174                 },
175                 .default_mask = &rte_flow_item_eth_mask,
176                 .mask_sz = sizeof(struct rte_flow_item_eth),
177                 .convert = mlx5_flow_create_eth,
178                 .dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
179         },
180         [RTE_FLOW_ITEM_TYPE_VLAN] = {
181                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
182                                RTE_FLOW_ITEM_TYPE_IPV6),
183                 .actions = valid_actions,
184                 .mask = &(const struct rte_flow_item_vlan){
185                         .tci = -1,
186                 },
187                 .default_mask = &rte_flow_item_vlan_mask,
188                 .mask_sz = sizeof(struct rte_flow_item_vlan),
189                 .convert = mlx5_flow_create_vlan,
190                 .dst_sz = 0,
191         },
192         [RTE_FLOW_ITEM_TYPE_IPV4] = {
193                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
194                                RTE_FLOW_ITEM_TYPE_TCP),
195                 .actions = valid_actions,
196                 .mask = &(const struct rte_flow_item_ipv4){
197                         .hdr = {
198                                 .src_addr = -1,
199                                 .dst_addr = -1,
200                                 .type_of_service = -1,
201                                 .next_proto_id = -1,
202                         },
203                 },
204                 .default_mask = &rte_flow_item_ipv4_mask,
205                 .mask_sz = sizeof(struct rte_flow_item_ipv4),
206                 .convert = mlx5_flow_create_ipv4,
207                 .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4_ext),
208         },
209         [RTE_FLOW_ITEM_TYPE_IPV6] = {
210                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
211                                RTE_FLOW_ITEM_TYPE_TCP),
212                 .actions = valid_actions,
213                 .mask = &(const struct rte_flow_item_ipv6){
214                         .hdr = {
215                                 .src_addr = {
216                                         0xff, 0xff, 0xff, 0xff,
217                                         0xff, 0xff, 0xff, 0xff,
218                                         0xff, 0xff, 0xff, 0xff,
219                                         0xff, 0xff, 0xff, 0xff,
220                                 },
221                                 .dst_addr = {
222                                         0xff, 0xff, 0xff, 0xff,
223                                         0xff, 0xff, 0xff, 0xff,
224                                         0xff, 0xff, 0xff, 0xff,
225                                         0xff, 0xff, 0xff, 0xff,
226                                 },
227                                 .vtc_flow = -1,
228                                 .proto = -1,
229                                 .hop_limits = -1,
230                         },
231                 },
232                 .default_mask = &rte_flow_item_ipv6_mask,
233                 .mask_sz = sizeof(struct rte_flow_item_ipv6),
234                 .convert = mlx5_flow_create_ipv6,
235                 .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6_ext),
236         },
237         [RTE_FLOW_ITEM_TYPE_UDP] = {
238                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
239                 .actions = valid_actions,
240                 .mask = &(const struct rte_flow_item_udp){
241                         .hdr = {
242                                 .src_port = -1,
243                                 .dst_port = -1,
244                         },
245                 },
246                 .default_mask = &rte_flow_item_udp_mask,
247                 .mask_sz = sizeof(struct rte_flow_item_udp),
248                 .convert = mlx5_flow_create_udp,
249                 .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
250         },
251         [RTE_FLOW_ITEM_TYPE_TCP] = {
252                 .actions = valid_actions,
253                 .mask = &(const struct rte_flow_item_tcp){
254                         .hdr = {
255                                 .src_port = -1,
256                                 .dst_port = -1,
257                         },
258                 },
259                 .default_mask = &rte_flow_item_tcp_mask,
260                 .mask_sz = sizeof(struct rte_flow_item_tcp),
261                 .convert = mlx5_flow_create_tcp,
262                 .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
263         },
264         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
265                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
266                 .actions = valid_actions,
267                 .mask = &(const struct rte_flow_item_vxlan){
268                         .vni = "\xff\xff\xff",
269                 },
270                 .default_mask = &rte_flow_item_vxlan_mask,
271                 .mask_sz = sizeof(struct rte_flow_item_vxlan),
272                 .convert = mlx5_flow_create_vxlan,
273                 .dst_sz = sizeof(struct ibv_exp_flow_spec_tunnel),
274         },
275 };
276
277 /** Structure to pass to the conversion function. */
278 struct mlx5_flow {
279         struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
280         unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
281         uint32_t inner; /**< Set once VXLAN is encountered. */
282         uint64_t hash_fields; /**< Fields that participate in the hash. */
283 };
284
285 /** Structure for Drop queue. */
286 struct rte_flow_drop {
287         struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
288         struct ibv_qp *qp; /**< Verbs queue pair. */
289         struct ibv_exp_wq *wqs[MLX5_DROP_WQ_N]; /**< Verbs work queue. */
290         struct ibv_cq *cq; /**< Verbs completion queue. */
291 };
292
293 struct mlx5_flow_action {
294         uint32_t queue:1; /**< Target is a receive queue. */
295         uint32_t drop:1; /**< Target is a drop queue. */
296         uint32_t mark:1; /**< Mark is present in the flow. */
297         uint32_t mark_id; /**< Mark identifier. */
298         uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
299         uint16_t queues_n; /**< Number of entries in queue[]. */
300 };
301
302 /**
303  * Check support for a given item.
304  *
305  * @param item[in]
306  *   Item specification.
307  * @param mask[in]
308  *   Bit-masks covering supported fields to compare with spec, last and mask in
309  *   \item.
310  * @param size
311  *   Bit-Mask size in bytes.
312  *
313  * @return
314  *   0 on success.
315  */
316 static int
317 mlx5_flow_item_validate(const struct rte_flow_item *item,
318                         const uint8_t *mask, unsigned int size)
319 {
320         int ret = 0;
321
322         if (!item->spec && (item->mask || item->last))
323                 return -1;
324         if (item->spec && !item->mask) {
325                 unsigned int i;
326                 const uint8_t *spec = item->spec;
327
328                 for (i = 0; i < size; ++i)
329                         if ((spec[i] | mask[i]) != mask[i])
330                                 return -1;
331         }
332         if (item->last && !item->mask) {
333                 unsigned int i;
334                 const uint8_t *spec = item->last;
335
336                 for (i = 0; i < size; ++i)
337                         if ((spec[i] | mask[i]) != mask[i])
338                                 return -1;
339         }
340         if (item->mask) {
341                 unsigned int i;
342                 const uint8_t *spec = item->mask;
343
344                 for (i = 0; i < size; ++i)
345                         if ((spec[i] | mask[i]) != mask[i])
346                                 return -1;
347         }
348         if (item->spec && item->last) {
349                 uint8_t spec[size];
350                 uint8_t last[size];
351                 const uint8_t *apply = mask;
352                 unsigned int i;
353
354                 if (item->mask)
355                         apply = item->mask;
356                 for (i = 0; i < size; ++i) {
357                         spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
358                         last[i] = ((const uint8_t *)item->last)[i] & apply[i];
359                 }
360                 ret = memcmp(spec, last, size);
361         }
362         return ret;
363 }
364
365 /**
366  * Validate a flow supported by the NIC.
367  *
368  * @param priv
369  *   Pointer to private structure.
370  * @param[in] attr
371  *   Flow rule attributes.
372  * @param[in] pattern
373  *   Pattern specification (list terminated by the END pattern item).
374  * @param[in] actions
375  *   Associated actions (list terminated by the END action).
376  * @param[out] error
377  *   Perform verbose error reporting if not NULL.
378  * @param[in, out] flow
379  *   Flow structure to update.
380  * @param[in, out] action
381  *   Action structure to update.
382  *
383  * @return
384  *   0 on success, a negative errno value otherwise and rte_errno is set.
385  */
386 static int
387 priv_flow_validate(struct priv *priv,
388                    const struct rte_flow_attr *attr,
389                    const struct rte_flow_item items[],
390                    const struct rte_flow_action actions[],
391                    struct rte_flow_error *error,
392                    struct mlx5_flow *flow,
393                    struct mlx5_flow_action *action)
394 {
395         const struct mlx5_flow_items *cur_item = mlx5_flow_items;
396
397         (void)priv;
398         if (attr->group) {
399                 rte_flow_error_set(error, ENOTSUP,
400                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
401                                    NULL,
402                                    "groups are not supported");
403                 return -rte_errno;
404         }
405         if (attr->priority) {
406                 rte_flow_error_set(error, ENOTSUP,
407                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
408                                    NULL,
409                                    "priorities are not supported");
410                 return -rte_errno;
411         }
412         if (attr->egress) {
413                 rte_flow_error_set(error, ENOTSUP,
414                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
415                                    NULL,
416                                    "egress is not supported");
417                 return -rte_errno;
418         }
419         if (!attr->ingress) {
420                 rte_flow_error_set(error, ENOTSUP,
421                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
422                                    NULL,
423                                    "only ingress is supported");
424                 return -rte_errno;
425         }
426         for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
427                 const struct mlx5_flow_items *token = NULL;
428                 unsigned int i;
429                 int err;
430
431                 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
432                         continue;
433                 for (i = 0;
434                      cur_item->items &&
435                      cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
436                      ++i) {
437                         if (cur_item->items[i] == items->type) {
438                                 token = &mlx5_flow_items[items->type];
439                                 break;
440                         }
441                 }
442                 if (!token)
443                         goto exit_item_not_supported;
444                 cur_item = token;
445                 err = mlx5_flow_item_validate(items,
446                                               (const uint8_t *)cur_item->mask,
447                                               cur_item->mask_sz);
448                 if (err)
449                         goto exit_item_not_supported;
450                 if (flow->ibv_attr && cur_item->convert) {
451                         err = cur_item->convert(items,
452                                                 (cur_item->default_mask ?
453                                                  cur_item->default_mask :
454                                                  cur_item->mask),
455                                                 flow);
456                         if (err)
457                                 goto exit_item_not_supported;
458                 } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
459                         if (flow->inner) {
460                                 rte_flow_error_set(error, ENOTSUP,
461                                                    RTE_FLOW_ERROR_TYPE_ITEM,
462                                                    items,
463                                                    "cannot recognize multiple"
464                                                    " VXLAN encapsulations");
465                                 return -rte_errno;
466                         }
467                         flow->inner = 1;
468                 }
469                 flow->offset += cur_item->dst_sz;
470         }
471         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
472                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
473                         continue;
474                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
475                         action->drop = 1;
476                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
477                         const struct rte_flow_action_queue *queue =
478                                 (const struct rte_flow_action_queue *)
479                                 actions->conf;
480                         uint16_t n;
481                         uint16_t found = 0;
482
483                         if (!queue || (queue->index > (priv->rxqs_n - 1)))
484                                 goto exit_action_not_supported;
485                         for (n = 0; n < action->queues_n; ++n) {
486                                 if (action->queues[n] == queue->index) {
487                                         found = 1;
488                                         break;
489                                 }
490                         }
491                         if (action->queues_n > 1 && !found) {
492                                 rte_flow_error_set(error, ENOTSUP,
493                                            RTE_FLOW_ERROR_TYPE_ACTION,
494                                            actions,
495                                            "queue action not in RSS queues");
496                                 return -rte_errno;
497                         }
498                         if (!found) {
499                                 action->queue = 1;
500                                 action->queues_n = 1;
501                                 action->queues[0] = queue->index;
502                         }
503                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
504                         const struct rte_flow_action_rss *rss =
505                                 (const struct rte_flow_action_rss *)
506                                 actions->conf;
507                         uint16_t n;
508
509                         if (action->queues_n == 1) {
510                                 uint16_t found = 0;
511
512                                 assert(action->queues_n);
513                                 for (n = 0; n < rss->num; ++n) {
514                                         if (action->queues[0] ==
515                                             rss->queue[n]) {
516                                                 found = 1;
517                                                 break;
518                                         }
519                                 }
520                                 if (!found) {
521                                         rte_flow_error_set(error, ENOTSUP,
522                                                    RTE_FLOW_ERROR_TYPE_ACTION,
523                                                    actions,
524                                                    "queue action not in RSS"
525                                                    " queues");
526                                         return -rte_errno;
527                                 }
528                         }
529                         for (n = 0; n < rss->num; ++n) {
530                                 if (rss->queue[n] >= priv->rxqs_n) {
531                                         rte_flow_error_set(error, EINVAL,
532                                                    RTE_FLOW_ERROR_TYPE_ACTION,
533                                                    actions,
534                                                    "queue id > number of"
535                                                    " queues");
536                                         return -rte_errno;
537                                 }
538                         }
539                         action->queue = 1;
540                         for (n = 0; n < rss->num; ++n)
541                                 action->queues[n] = rss->queue[n];
542                         action->queues_n = rss->num;
543                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
544                         const struct rte_flow_action_mark *mark =
545                                 (const struct rte_flow_action_mark *)
546                                 actions->conf;
547
548                         if (!mark) {
549                                 rte_flow_error_set(error, EINVAL,
550                                                    RTE_FLOW_ERROR_TYPE_ACTION,
551                                                    actions,
552                                                    "mark must be defined");
553                                 return -rte_errno;
554                         } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
555                                 rte_flow_error_set(error, ENOTSUP,
556                                                    RTE_FLOW_ERROR_TYPE_ACTION,
557                                                    actions,
558                                                    "mark must be between 0"
559                                                    " and 16777199");
560                                 return -rte_errno;
561                         }
562                         action->mark = 1;
563                         action->mark_id = mark->id;
564                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
565                         action->mark = 1;
566                 } else {
567                         goto exit_action_not_supported;
568                 }
569         }
570         if (action->mark && !flow->ibv_attr && !action->drop)
571                 flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);
572         if (!action->queue && !action->drop) {
573                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
574                                    NULL, "no valid action");
575                 return -rte_errno;
576         }
577         return 0;
578 exit_item_not_supported:
579         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
580                            items, "item not supported");
581         return -rte_errno;
582 exit_action_not_supported:
583         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
584                            actions, "action not supported");
585         return -rte_errno;
586 }
587
588 /**
589  * Validate a flow supported by the NIC.
590  *
591  * @see rte_flow_validate()
592  * @see rte_flow_ops
593  */
594 int
595 mlx5_flow_validate(struct rte_eth_dev *dev,
596                    const struct rte_flow_attr *attr,
597                    const struct rte_flow_item items[],
598                    const struct rte_flow_action actions[],
599                    struct rte_flow_error *error)
600 {
601         struct priv *priv = dev->data->dev_private;
602         int ret;
603         struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) };
604         struct mlx5_flow_action action = {
605                 .queue = 0,
606                 .drop = 0,
607                 .mark = 0,
608                 .mark_id = MLX5_FLOW_MARK_DEFAULT,
609                 .queues_n = 0,
610         };
611
612         priv_lock(priv);
613         ret = priv_flow_validate(priv, attr, items, actions, error, &flow,
614                                  &action);
615         priv_unlock(priv);
616         return ret;
617 }
618
619 /**
620  * Convert Ethernet item to Verbs specification.
621  *
622  * @param item[in]
623  *   Item specification.
624  * @param default_mask[in]
625  *   Default bit-masks to use when item->mask is not provided.
626  * @param data[in, out]
627  *   User structure.
628  */
629 static int
630 mlx5_flow_create_eth(const struct rte_flow_item *item,
631                      const void *default_mask,
632                      void *data)
633 {
634         const struct rte_flow_item_eth *spec = item->spec;
635         const struct rte_flow_item_eth *mask = item->mask;
636         struct mlx5_flow *flow = (struct mlx5_flow *)data;
637         struct ibv_exp_flow_spec_eth *eth;
638         const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
639         unsigned int i;
640
641         ++flow->ibv_attr->num_of_specs;
642         flow->ibv_attr->priority = 2;
643         flow->hash_fields = 0;
644         eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
645         *eth = (struct ibv_exp_flow_spec_eth) {
646                 .type = flow->inner | IBV_EXP_FLOW_SPEC_ETH,
647                 .size = eth_size,
648         };
649         if (!spec)
650                 return 0;
651         if (!mask)
652                 mask = default_mask;
653         memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
654         memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
655         eth->val.ether_type = spec->type;
656         memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
657         memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
658         eth->mask.ether_type = mask->type;
659         /* Remove unwanted bits from values. */
660         for (i = 0; i < ETHER_ADDR_LEN; ++i) {
661                 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
662                 eth->val.src_mac[i] &= eth->mask.src_mac[i];
663         }
664         eth->val.ether_type &= eth->mask.ether_type;
665         return 0;
666 }
667
668 /**
669  * Convert VLAN item to Verbs specification.
670  *
671  * @param item[in]
672  *   Item specification.
673  * @param default_mask[in]
674  *   Default bit-masks to use when item->mask is not provided.
675  * @param data[in, out]
676  *   User structure.
677  */
678 static int
679 mlx5_flow_create_vlan(const struct rte_flow_item *item,
680                       const void *default_mask,
681                       void *data)
682 {
683         const struct rte_flow_item_vlan *spec = item->spec;
684         const struct rte_flow_item_vlan *mask = item->mask;
685         struct mlx5_flow *flow = (struct mlx5_flow *)data;
686         struct ibv_exp_flow_spec_eth *eth;
687         const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
688
689         eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
690         if (!spec)
691                 return 0;
692         if (!mask)
693                 mask = default_mask;
694         eth->val.vlan_tag = spec->tci;
695         eth->mask.vlan_tag = mask->tci;
696         eth->val.vlan_tag &= eth->mask.vlan_tag;
697         return 0;
698 }
699
700 /**
701  * Convert IPv4 item to Verbs specification.
702  *
703  * @param item[in]
704  *   Item specification.
705  * @param default_mask[in]
706  *   Default bit-masks to use when item->mask is not provided.
707  * @param data[in, out]
708  *   User structure.
709  */
710 static int
711 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
712                       const void *default_mask,
713                       void *data)
714 {
715         const struct rte_flow_item_ipv4 *spec = item->spec;
716         const struct rte_flow_item_ipv4 *mask = item->mask;
717         struct mlx5_flow *flow = (struct mlx5_flow *)data;
718         struct ibv_exp_flow_spec_ipv4_ext *ipv4;
719         unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4_ext);
720
721         ++flow->ibv_attr->num_of_specs;
722         flow->ibv_attr->priority = 1;
723         flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
724                              IBV_EXP_RX_HASH_DST_IPV4);
725         ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
726         *ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) {
727                 .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT,
728                 .size = ipv4_size,
729         };
730         if (!spec)
731                 return 0;
732         if (!mask)
733                 mask = default_mask;
734         ipv4->val = (struct ibv_exp_flow_ipv4_ext_filter){
735                 .src_ip = spec->hdr.src_addr,
736                 .dst_ip = spec->hdr.dst_addr,
737                 .proto = spec->hdr.next_proto_id,
738                 .tos = spec->hdr.type_of_service,
739         };
740         ipv4->mask = (struct ibv_exp_flow_ipv4_ext_filter){
741                 .src_ip = mask->hdr.src_addr,
742                 .dst_ip = mask->hdr.dst_addr,
743                 .proto = mask->hdr.next_proto_id,
744                 .tos = mask->hdr.type_of_service,
745         };
746         /* Remove unwanted bits from values. */
747         ipv4->val.src_ip &= ipv4->mask.src_ip;
748         ipv4->val.dst_ip &= ipv4->mask.dst_ip;
749         ipv4->val.proto &= ipv4->mask.proto;
750         ipv4->val.tos &= ipv4->mask.tos;
751         return 0;
752 }
753
754 /**
755  * Convert IPv6 item to Verbs specification.
756  *
757  * @param item[in]
758  *   Item specification.
759  * @param default_mask[in]
760  *   Default bit-masks to use when item->mask is not provided.
761  * @param data[in, out]
762  *   User structure.
763  */
764 static int
765 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
766                       const void *default_mask,
767                       void *data)
768 {
769         const struct rte_flow_item_ipv6 *spec = item->spec;
770         const struct rte_flow_item_ipv6 *mask = item->mask;
771         struct mlx5_flow *flow = (struct mlx5_flow *)data;
772         struct ibv_exp_flow_spec_ipv6_ext *ipv6;
773         unsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6_ext);
774
775         ++flow->ibv_attr->num_of_specs;
776         flow->ibv_attr->priority = 1;
777         flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
778                              IBV_EXP_RX_HASH_DST_IPV6);
779         ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
780         *ipv6 = (struct ibv_exp_flow_spec_ipv6_ext) {
781                 .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6_EXT,
782                 .size = ipv6_size,
783         };
784         if (!spec)
785                 return 0;
786         if (!mask)
787                 mask = default_mask;
788         memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
789                RTE_DIM(ipv6->val.src_ip));
790         memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
791                RTE_DIM(ipv6->val.dst_ip));
792         memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
793                RTE_DIM(ipv6->mask.src_ip));
794         memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
795                RTE_DIM(ipv6->mask.dst_ip));
796         ipv6->mask.flow_label = mask->hdr.vtc_flow;
797         ipv6->mask.next_hdr = mask->hdr.proto;
798         ipv6->mask.hop_limit = mask->hdr.hop_limits;
799         ipv6->val.flow_label &= ipv6->mask.flow_label;
800         ipv6->val.next_hdr &= ipv6->mask.next_hdr;
801         ipv6->val.hop_limit &= ipv6->mask.hop_limit;
802         return 0;
803 }
804
805 /**
806  * Convert UDP item to Verbs specification.
807  *
808  * @param item[in]
809  *   Item specification.
810  * @param default_mask[in]
811  *   Default bit-masks to use when item->mask is not provided.
812  * @param data[in, out]
813  *   User structure.
814  */
815 static int
816 mlx5_flow_create_udp(const struct rte_flow_item *item,
817                      const void *default_mask,
818                      void *data)
819 {
820         const struct rte_flow_item_udp *spec = item->spec;
821         const struct rte_flow_item_udp *mask = item->mask;
822         struct mlx5_flow *flow = (struct mlx5_flow *)data;
823         struct ibv_exp_flow_spec_tcp_udp *udp;
824         unsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
825
826         ++flow->ibv_attr->num_of_specs;
827         flow->ibv_attr->priority = 0;
828         flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_UDP |
829                               IBV_EXP_RX_HASH_DST_PORT_UDP);
830         udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
831         *udp = (struct ibv_exp_flow_spec_tcp_udp) {
832                 .type = flow->inner | IBV_EXP_FLOW_SPEC_UDP,
833                 .size = udp_size,
834         };
835         if (!spec)
836                 return 0;
837         if (!mask)
838                 mask = default_mask;
839         udp->val.dst_port = spec->hdr.dst_port;
840         udp->val.src_port = spec->hdr.src_port;
841         udp->mask.dst_port = mask->hdr.dst_port;
842         udp->mask.src_port = mask->hdr.src_port;
843         /* Remove unwanted bits from values. */
844         udp->val.src_port &= udp->mask.src_port;
845         udp->val.dst_port &= udp->mask.dst_port;
846         return 0;
847 }
848
849 /**
850  * Convert TCP item to Verbs specification.
851  *
852  * @param item[in]
853  *   Item specification.
854  * @param default_mask[in]
855  *   Default bit-masks to use when item->mask is not provided.
856  * @param data[in, out]
857  *   User structure.
858  */
859 static int
860 mlx5_flow_create_tcp(const struct rte_flow_item *item,
861                      const void *default_mask,
862                      void *data)
863 {
864         const struct rte_flow_item_tcp *spec = item->spec;
865         const struct rte_flow_item_tcp *mask = item->mask;
866         struct mlx5_flow *flow = (struct mlx5_flow *)data;
867         struct ibv_exp_flow_spec_tcp_udp *tcp;
868         unsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
869
870         ++flow->ibv_attr->num_of_specs;
871         flow->ibv_attr->priority = 0;
872         flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_TCP |
873                               IBV_EXP_RX_HASH_DST_PORT_TCP);
874         tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
875         *tcp = (struct ibv_exp_flow_spec_tcp_udp) {
876                 .type = flow->inner | IBV_EXP_FLOW_SPEC_TCP,
877                 .size = tcp_size,
878         };
879         if (!spec)
880                 return 0;
881         if (!mask)
882                 mask = default_mask;
883         tcp->val.dst_port = spec->hdr.dst_port;
884         tcp->val.src_port = spec->hdr.src_port;
885         tcp->mask.dst_port = mask->hdr.dst_port;
886         tcp->mask.src_port = mask->hdr.src_port;
887         /* Remove unwanted bits from values. */
888         tcp->val.src_port &= tcp->mask.src_port;
889         tcp->val.dst_port &= tcp->mask.dst_port;
890         return 0;
891 }
892
893 /**
894  * Convert VXLAN item to Verbs specification.
895  *
896  * @param item[in]
897  *   Item specification.
898  * @param default_mask[in]
899  *   Default bit-masks to use when item->mask is not provided.
900  * @param data[in, out]
901  *   User structure.
902  */
903 static int
904 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
905                        const void *default_mask,
906                        void *data)
907 {
908         const struct rte_flow_item_vxlan *spec = item->spec;
909         const struct rte_flow_item_vxlan *mask = item->mask;
910         struct mlx5_flow *flow = (struct mlx5_flow *)data;
911         struct ibv_exp_flow_spec_tunnel *vxlan;
912         unsigned int size = sizeof(struct ibv_exp_flow_spec_tunnel);
913         union vni {
914                 uint32_t vlan_id;
915                 uint8_t vni[4];
916         } id;
917
918         ++flow->ibv_attr->num_of_specs;
919         flow->ibv_attr->priority = 0;
920         id.vni[0] = 0;
921         vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
922         *vxlan = (struct ibv_exp_flow_spec_tunnel) {
923                 .type = flow->inner | IBV_EXP_FLOW_SPEC_VXLAN_TUNNEL,
924                 .size = size,
925         };
926         flow->inner = IBV_EXP_FLOW_SPEC_INNER;
927         if (!spec)
928                 return 0;
929         if (!mask)
930                 mask = default_mask;
931         memcpy(&id.vni[1], spec->vni, 3);
932         vxlan->val.tunnel_id = id.vlan_id;
933         memcpy(&id.vni[1], mask->vni, 3);
934         vxlan->mask.tunnel_id = id.vlan_id;
935         /* Remove unwanted bits from values. */
936         vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
937         return 0;
938 }
939
940 /**
941  * Convert mark/flag action to Verbs specification.
942  *
943  * @param flow
944  *   Pointer to MLX5 flow structure.
945  * @param mark_id
946  *   Mark identifier.
947  */
948 static int
949 mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id)
950 {
951         struct ibv_exp_flow_spec_action_tag *tag;
952         unsigned int size = sizeof(struct ibv_exp_flow_spec_action_tag);
953
954         tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
955         *tag = (struct ibv_exp_flow_spec_action_tag){
956                 .type = IBV_EXP_FLOW_SPEC_ACTION_TAG,
957                 .size = size,
958                 .tag_id = mlx5_flow_mark_set(mark_id),
959         };
960         ++flow->ibv_attr->num_of_specs;
961         return 0;
962 }
963
964 /**
965  * Complete flow rule creation with a drop queue.
966  *
967  * @param priv
968  *   Pointer to private structure.
969  * @param flow
970  *   MLX5 flow attributes (filled by mlx5_flow_validate()).
971  * @param[out] error
972  *   Perform verbose error reporting if not NULL.
973  *
974  * @return
975  *   A flow if the rule could be created.
976  */
977 static struct rte_flow *
978 priv_flow_create_action_queue_drop(struct priv *priv,
979                                    struct mlx5_flow *flow,
980                                    struct rte_flow_error *error)
981 {
982         struct rte_flow *rte_flow;
983
984         assert(priv->pd);
985         assert(priv->ctx);
986         rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
987         if (!rte_flow) {
988                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
989                                    NULL, "cannot allocate flow memory");
990                 return NULL;
991         }
992         rte_flow->drop = 1;
993         rte_flow->ibv_attr = flow->ibv_attr;
994         rte_flow->qp = priv->flow_drop_queue->qp;
995         if (!priv->started)
996                 return rte_flow;
997         rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
998                                                  rte_flow->ibv_attr);
999         if (!rte_flow->ibv_flow) {
1000                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1001                                    NULL, "flow rule creation failure");
1002                 goto error;
1003         }
1004         return rte_flow;
1005 error:
1006         assert(rte_flow);
1007         rte_free(rte_flow);
1008         return NULL;
1009 }
1010
1011 /**
1012  * Complete flow rule creation.
1013  *
1014  * @param priv
1015  *   Pointer to private structure.
1016  * @param flow
1017  *   MLX5 flow attributes (filled by mlx5_flow_validate()).
1018  * @param action
1019  *   Target action structure.
1020  * @param[out] error
1021  *   Perform verbose error reporting if not NULL.
1022  *
1023  * @return
1024  *   A flow if the rule could be created.
1025  */
1026 static struct rte_flow *
1027 priv_flow_create_action_queue(struct priv *priv,
1028                               struct mlx5_flow *flow,
1029                               struct mlx5_flow_action *action,
1030                               struct rte_flow_error *error)
1031 {
1032         struct rte_flow *rte_flow;
1033         unsigned int i;
1034         unsigned int j;
1035         const unsigned int wqs_n = 1 << log2above(action->queues_n);
1036         struct ibv_exp_wq *wqs[wqs_n];
1037
1038         assert(priv->pd);
1039         assert(priv->ctx);
1040         assert(!action->drop);
1041         rte_flow = rte_calloc(__func__, 1,
1042                               sizeof(*rte_flow) + sizeof(struct rxq *) *
1043                               action->queues_n, 0);
1044         if (!rte_flow) {
1045                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1046                                    NULL, "cannot allocate flow memory");
1047                 return NULL;
1048         }
1049         rte_flow->rxqs = (struct rxq *(*)[])((uintptr_t)rte_flow +
1050                                              sizeof(struct rxq *) *
1051                                              action->queues_n);
1052         for (i = 0; i < action->queues_n; ++i) {
1053                 struct rxq_ctrl *rxq;
1054
1055                 rxq = container_of((*priv->rxqs)[action->queues[i]],
1056                                    struct rxq_ctrl, rxq);
1057                 wqs[i] = rxq->wq;
1058                 (*rte_flow->rxqs)[i] = &rxq->rxq;
1059                 ++rte_flow->rxqs_n;
1060                 rxq->rxq.mark |= action->mark;
1061         }
1062         /* finalise indirection table. */
1063         for (j = 0; i < wqs_n; ++i, ++j) {
1064                 wqs[i] = wqs[j];
1065                 if (j == action->queues_n)
1066                         j = 0;
1067         }
1068         rte_flow->mark = action->mark;
1069         rte_flow->ibv_attr = flow->ibv_attr;
1070         rte_flow->hash_fields = flow->hash_fields;
1071         rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
1072                 priv->ctx,
1073                 &(struct ibv_exp_rwq_ind_table_init_attr){
1074                         .pd = priv->pd,
1075                         .log_ind_tbl_size = log2above(action->queues_n),
1076                         .ind_tbl = wqs,
1077                         .comp_mask = 0,
1078                 });
1079         if (!rte_flow->ind_table) {
1080                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1081                                    NULL, "cannot allocate indirection table");
1082                 goto error;
1083         }
1084         rte_flow->qp = ibv_exp_create_qp(
1085                 priv->ctx,
1086                 &(struct ibv_exp_qp_init_attr){
1087                         .qp_type = IBV_QPT_RAW_PACKET,
1088                         .comp_mask =
1089                                 IBV_EXP_QP_INIT_ATTR_PD |
1090                                 IBV_EXP_QP_INIT_ATTR_PORT |
1091                                 IBV_EXP_QP_INIT_ATTR_RX_HASH,
1092                         .pd = priv->pd,
1093                         .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
1094                                 .rx_hash_function =
1095                                         IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
1096                                 .rx_hash_key_len = rss_hash_default_key_len,
1097                                 .rx_hash_key = rss_hash_default_key,
1098                                 .rx_hash_fields_mask = rte_flow->hash_fields,
1099                                 .rwq_ind_tbl = rte_flow->ind_table,
1100                         },
1101                         .port_num = priv->port,
1102                 });
1103         if (!rte_flow->qp) {
1104                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1105                                    NULL, "cannot allocate QP");
1106                 goto error;
1107         }
1108         if (!priv->started)
1109                 return rte_flow;
1110         rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
1111                                                  rte_flow->ibv_attr);
1112         if (!rte_flow->ibv_flow) {
1113                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1114                                    NULL, "flow rule creation failure");
1115                 goto error;
1116         }
1117         return rte_flow;
1118 error:
1119         assert(rte_flow);
1120         if (rte_flow->qp)
1121                 ibv_destroy_qp(rte_flow->qp);
1122         if (rte_flow->ind_table)
1123                 ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
1124         rte_free(rte_flow);
1125         return NULL;
1126 }
1127
1128 /**
1129  * Convert a flow.
1130  *
1131  * @param priv
1132  *   Pointer to private structure.
1133  * @param[in] attr
1134  *   Flow rule attributes.
1135  * @param[in] pattern
1136  *   Pattern specification (list terminated by the END pattern item).
1137  * @param[in] actions
1138  *   Associated actions (list terminated by the END action).
1139  * @param[out] error
1140  *   Perform verbose error reporting if not NULL.
1141  *
1142  * @return
1143  *   A flow on success, NULL otherwise.
1144  */
1145 static struct rte_flow *
1146 priv_flow_create(struct priv *priv,
1147                  const struct rte_flow_attr *attr,
1148                  const struct rte_flow_item items[],
1149                  const struct rte_flow_action actions[],
1150                  struct rte_flow_error *error)
1151 {
1152         struct rte_flow *rte_flow;
1153         struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), };
1154         struct mlx5_flow_action action = {
1155                 .queue = 0,
1156                 .drop = 0,
1157                 .mark = 0,
1158                 .mark_id = MLX5_FLOW_MARK_DEFAULT,
1159                 .queues_n = 0,
1160         };
1161         int err;
1162
1163         err = priv_flow_validate(priv, attr, items, actions, error, &flow,
1164                                  &action);
1165         if (err)
1166                 goto exit;
1167         flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
1168         flow.offset = sizeof(struct ibv_exp_flow_attr);
1169         if (!flow.ibv_attr) {
1170                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1171                                    NULL, "cannot allocate ibv_attr memory");
1172                 goto exit;
1173         }
1174         *flow.ibv_attr = (struct ibv_exp_flow_attr){
1175                 .type = IBV_EXP_FLOW_ATTR_NORMAL,
1176                 .size = sizeof(struct ibv_exp_flow_attr),
1177                 .priority = attr->priority,
1178                 .num_of_specs = 0,
1179                 .port = 0,
1180                 .flags = 0,
1181                 .reserved = 0,
1182         };
1183         flow.inner = 0;
1184         flow.hash_fields = 0;
1185         claim_zero(priv_flow_validate(priv, attr, items, actions,
1186                                       error, &flow, &action));
1187         if (action.mark && !action.drop) {
1188                 mlx5_flow_create_flag_mark(&flow, action.mark_id);
1189                 flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag);
1190         }
1191         if (action.drop)
1192                 rte_flow =
1193                         priv_flow_create_action_queue_drop(priv, &flow, error);
1194         else
1195                 rte_flow = priv_flow_create_action_queue(priv, &flow, &action,
1196                                                          error);
1197         if (!rte_flow)
1198                 goto exit;
1199         return rte_flow;
1200 exit:
1201         rte_free(flow.ibv_attr);
1202         return NULL;
1203 }
1204
1205 /**
1206  * Create a flow.
1207  *
1208  * @see rte_flow_create()
1209  * @see rte_flow_ops
1210  */
1211 struct rte_flow *
1212 mlx5_flow_create(struct rte_eth_dev *dev,
1213                  const struct rte_flow_attr *attr,
1214                  const struct rte_flow_item items[],
1215                  const struct rte_flow_action actions[],
1216                  struct rte_flow_error *error)
1217 {
1218         struct priv *priv = dev->data->dev_private;
1219         struct rte_flow *flow;
1220
1221         priv_lock(priv);
1222         flow = priv_flow_create(priv, attr, items, actions, error);
1223         if (flow) {
1224                 LIST_INSERT_HEAD(&priv->flows, flow, next);
1225                 DEBUG("Flow created %p", (void *)flow);
1226         }
1227         priv_unlock(priv);
1228         return flow;
1229 }
1230
1231 /**
1232  * Destroy a flow.
1233  *
1234  * @param priv
1235  *   Pointer to private structure.
1236  * @param[in] flow
1237  *   Flow to destroy.
1238  */
1239 static void
1240 priv_flow_destroy(struct priv *priv,
1241                   struct rte_flow *flow)
1242 {
1243         (void)priv;
1244         LIST_REMOVE(flow, next);
1245         if (flow->ibv_flow)
1246                 claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
1247         if (flow->drop)
1248                 goto free;
1249         if (flow->qp)
1250                 claim_zero(ibv_destroy_qp(flow->qp));
1251         if (flow->ind_table)
1252                 claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table));
1253         if (flow->drop && flow->wq)
1254                 claim_zero(ibv_exp_destroy_wq(flow->wq));
1255         if (flow->drop && flow->cq)
1256                 claim_zero(ibv_destroy_cq(flow->cq));
1257         if (flow->mark) {
1258                 struct rte_flow *tmp;
1259                 struct rxq *rxq;
1260                 uint32_t mark_n = 0;
1261                 uint32_t queue_n;
1262
1263                 /*
1264                  * To remove the mark from the queue, the queue must not be
1265                  * present in any other marked flow (RSS or not).
1266                  */
1267                 for (queue_n = 0; queue_n < flow->rxqs_n; ++queue_n) {
1268                         rxq = (*flow->rxqs)[queue_n];
1269                         for (tmp = LIST_FIRST(&priv->flows);
1270                              tmp;
1271                              tmp = LIST_NEXT(tmp, next)) {
1272                                 uint32_t tqueue_n;
1273
1274                                 if (tmp->drop)
1275                                         continue;
1276                                 for (tqueue_n = 0;
1277                                      tqueue_n < tmp->rxqs_n;
1278                                      ++tqueue_n) {
1279                                         struct rxq *trxq;
1280
1281                                         trxq = (*tmp->rxqs)[tqueue_n];
1282                                         if (rxq == trxq)
1283                                                 ++mark_n;
1284                                 }
1285                         }
1286                         rxq->mark = !!mark_n;
1287                 }
1288         }
1289 free:
1290         rte_free(flow->ibv_attr);
1291         DEBUG("Flow destroyed %p", (void *)flow);
1292         rte_free(flow);
1293 }
1294
1295 /**
1296  * Destroy a flow.
1297  *
1298  * @see rte_flow_destroy()
1299  * @see rte_flow_ops
1300  */
1301 int
1302 mlx5_flow_destroy(struct rte_eth_dev *dev,
1303                   struct rte_flow *flow,
1304                   struct rte_flow_error *error)
1305 {
1306         struct priv *priv = dev->data->dev_private;
1307
1308         (void)error;
1309         priv_lock(priv);
1310         priv_flow_destroy(priv, flow);
1311         priv_unlock(priv);
1312         return 0;
1313 }
1314
1315 /**
1316  * Destroy all flows.
1317  *
1318  * @param priv
1319  *   Pointer to private structure.
1320  */
1321 static void
1322 priv_flow_flush(struct priv *priv)
1323 {
1324         while (!LIST_EMPTY(&priv->flows)) {
1325                 struct rte_flow *flow;
1326
1327                 flow = LIST_FIRST(&priv->flows);
1328                 priv_flow_destroy(priv, flow);
1329         }
1330 }
1331
1332 /**
1333  * Destroy all flows.
1334  *
1335  * @see rte_flow_flush()
1336  * @see rte_flow_ops
1337  */
1338 int
1339 mlx5_flow_flush(struct rte_eth_dev *dev,
1340                 struct rte_flow_error *error)
1341 {
1342         struct priv *priv = dev->data->dev_private;
1343
1344         (void)error;
1345         priv_lock(priv);
1346         priv_flow_flush(priv);
1347         priv_unlock(priv);
1348         return 0;
1349 }
1350
1351 /**
1352  * Create drop queue.
1353  *
1354  * @param priv
1355  *   Pointer to private structure.
1356  *
1357  * @return
1358  *   0 on success.
1359  */
1360 static int
1361 priv_flow_create_drop_queue(struct priv *priv)
1362 {
1363         struct rte_flow_drop *fdq = NULL;
1364         unsigned int i;
1365
1366         assert(priv->pd);
1367         assert(priv->ctx);
1368         fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
1369         if (!fdq) {
1370                 WARN("cannot allocate memory for drop queue");
1371                 goto error;
1372         }
1373         fdq->cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
1374                         &(struct ibv_exp_cq_init_attr){
1375                         .comp_mask = 0,
1376                         });
1377         if (!fdq->cq) {
1378                 WARN("cannot allocate CQ for drop queue");
1379                 goto error;
1380         }
1381         for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
1382                 fdq->wqs[i] = ibv_exp_create_wq(priv->ctx,
1383                                 &(struct ibv_exp_wq_init_attr){
1384                                 .wq_type = IBV_EXP_WQT_RQ,
1385                                 .max_recv_wr = 1,
1386                                 .max_recv_sge = 1,
1387                                 .pd = priv->pd,
1388                                 .cq = fdq->cq,
1389                                 });
1390                 if (!fdq->wqs[i]) {
1391                         WARN("cannot allocate WQ for drop queue");
1392                         goto error;
1393                 }
1394         }
1395         fdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
1396                         &(struct ibv_exp_rwq_ind_table_init_attr){
1397                         .pd = priv->pd,
1398                         .log_ind_tbl_size = 0,
1399                         .ind_tbl = fdq->wqs,
1400                         .comp_mask = 0,
1401                         });
1402         if (!fdq->ind_table) {
1403                 WARN("cannot allocate indirection table for drop queue");
1404                 goto error;
1405         }
1406         fdq->qp = ibv_exp_create_qp(priv->ctx,
1407                 &(struct ibv_exp_qp_init_attr){
1408                         .qp_type = IBV_QPT_RAW_PACKET,
1409                         .comp_mask =
1410                                 IBV_EXP_QP_INIT_ATTR_PD |
1411                                 IBV_EXP_QP_INIT_ATTR_PORT |
1412                                 IBV_EXP_QP_INIT_ATTR_RX_HASH,
1413                         .pd = priv->pd,
1414                         .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
1415                                 .rx_hash_function =
1416                                         IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
1417                                 .rx_hash_key_len = rss_hash_default_key_len,
1418                                 .rx_hash_key = rss_hash_default_key,
1419                                 .rx_hash_fields_mask = 0,
1420                                 .rwq_ind_tbl = fdq->ind_table,
1421                                 },
1422                         .port_num = priv->port,
1423                         });
1424         if (!fdq->qp) {
1425                 WARN("cannot allocate QP for drop queue");
1426                 goto error;
1427         }
1428         priv->flow_drop_queue = fdq;
1429         return 0;
1430 error:
1431         if (fdq->qp)
1432                 claim_zero(ibv_destroy_qp(fdq->qp));
1433         if (fdq->ind_table)
1434                 claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
1435         for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
1436                 if (fdq->wqs[i])
1437                         claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
1438         }
1439         if (fdq->cq)
1440                 claim_zero(ibv_destroy_cq(fdq->cq));
1441         if (fdq)
1442                 rte_free(fdq);
1443         priv->flow_drop_queue = NULL;
1444         return -1;
1445 }
1446
1447 /**
1448  * Delete drop queue.
1449  *
1450  * @param priv
1451  *   Pointer to private structure.
1452  */
1453 static void
1454 priv_flow_delete_drop_queue(struct priv *priv)
1455 {
1456         struct rte_flow_drop *fdq = priv->flow_drop_queue;
1457         unsigned int i;
1458
1459         claim_zero(ibv_destroy_qp(fdq->qp));
1460         claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
1461         for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
1462                 assert(fdq->wqs[i]);
1463                 claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
1464         }
1465         claim_zero(ibv_destroy_cq(fdq->cq));
1466         rte_free(fdq);
1467         priv->flow_drop_queue = NULL;
1468 }
1469
1470 /**
1471  * Remove all flows.
1472  *
1473  * Called by dev_stop() to remove all flows.
1474  *
1475  * @param priv
1476  *   Pointer to private structure.
1477  */
1478 void
1479 priv_flow_stop(struct priv *priv)
1480 {
1481         struct rte_flow *flow;
1482
1483         for (flow = LIST_FIRST(&priv->flows);
1484              flow;
1485              flow = LIST_NEXT(flow, next)) {
1486                 claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
1487                 flow->ibv_flow = NULL;
1488                 if (flow->mark) {
1489                         unsigned int n;
1490
1491                         for (n = 0; n < flow->rxqs_n; ++n)
1492                                 (*flow->rxqs)[n]->mark = 0;
1493                 }
1494                 DEBUG("Flow %p removed", (void *)flow);
1495         }
1496         priv_flow_delete_drop_queue(priv);
1497 }
1498
1499 /**
1500  * Add all flows.
1501  *
1502  * @param priv
1503  *   Pointer to private structure.
1504  *
1505  * @return
1506  *   0 on success, a errno value otherwise and rte_errno is set.
1507  */
1508 int
1509 priv_flow_start(struct priv *priv)
1510 {
1511         int ret;
1512         struct rte_flow *flow;
1513
1514         ret = priv_flow_create_drop_queue(priv);
1515         if (ret)
1516                 return -1;
1517         for (flow = LIST_FIRST(&priv->flows);
1518              flow;
1519              flow = LIST_NEXT(flow, next)) {
1520                 struct ibv_qp *qp;
1521
1522                 if (flow->drop)
1523                         qp = priv->flow_drop_queue->qp;
1524                 else
1525                         qp = flow->qp;
1526                 flow->ibv_flow = ibv_exp_create_flow(qp, flow->ibv_attr);
1527                 if (!flow->ibv_flow) {
1528                         DEBUG("Flow %p cannot be applied", (void *)flow);
1529                         rte_errno = EINVAL;
1530                         return rte_errno;
1531                 }
1532                 DEBUG("Flow %p applied", (void *)flow);
1533                 if (flow->mark) {
1534                         unsigned int n;
1535
1536                         for (n = 0; n < flow->rxqs_n; ++n)
1537                                 (*flow->rxqs)[n]->mark = 1;
1538                 }
1539         }
1540         return 0;
1541 }