800ec9f077910f482103a340d957f642a3e0304f
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2016 6WIND S.A.
5  *   Copyright 2016 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <string.h>
36
37 /* Verbs header. */
38 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
39 #ifdef PEDANTIC
40 #pragma GCC diagnostic ignored "-Wpedantic"
41 #endif
42 #include <infiniband/verbs.h>
43 #ifdef PEDANTIC
44 #pragma GCC diagnostic error "-Wpedantic"
45 #endif
46
47 #include <rte_ethdev_driver.h>
48 #include <rte_flow.h>
49 #include <rte_flow_driver.h>
50 #include <rte_malloc.h>
51 #include <rte_ip.h>
52
53 #include "mlx5.h"
54 #include "mlx5_defs.h"
55 #include "mlx5_prm.h"
56 #include "mlx5_glue.h"
57
58 /* Define minimal priority for control plane flows. */
59 #define MLX5_CTRL_FLOW_PRIORITY 4
60
61 /* Internet Protocol versions. */
62 #define MLX5_IPV4 4
63 #define MLX5_IPV6 6
64
65 #ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
66 struct ibv_flow_spec_counter_action {
67         int dummy;
68 };
69 #endif
70
71 /* Dev ops structure defined in mlx5.c */
72 extern const struct eth_dev_ops mlx5_dev_ops;
73 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
74
75 static int
76 mlx5_flow_create_eth(const struct rte_flow_item *item,
77                      const void *default_mask,
78                      void *data);
79
80 static int
81 mlx5_flow_create_vlan(const struct rte_flow_item *item,
82                       const void *default_mask,
83                       void *data);
84
85 static int
86 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
87                       const void *default_mask,
88                       void *data);
89
90 static int
91 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
92                       const void *default_mask,
93                       void *data);
94
95 static int
96 mlx5_flow_create_udp(const struct rte_flow_item *item,
97                      const void *default_mask,
98                      void *data);
99
100 static int
101 mlx5_flow_create_tcp(const struct rte_flow_item *item,
102                      const void *default_mask,
103                      void *data);
104
105 static int
106 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
107                        const void *default_mask,
108                        void *data);
109
110 struct mlx5_flow_parse;
111
112 static void
113 mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
114                       unsigned int size);
115
116 static int
117 mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id);
118
119 static int
120 mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser);
121
122 /* Hash RX queue types. */
123 enum hash_rxq_type {
124         HASH_RXQ_TCPV4,
125         HASH_RXQ_UDPV4,
126         HASH_RXQ_IPV4,
127         HASH_RXQ_TCPV6,
128         HASH_RXQ_UDPV6,
129         HASH_RXQ_IPV6,
130         HASH_RXQ_ETH,
131 };
132
133 /* Initialization data for hash RX queue. */
134 struct hash_rxq_init {
135         uint64_t hash_fields; /* Fields that participate in the hash. */
136         uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
137         unsigned int flow_priority; /* Flow priority to use. */
138         unsigned int ip_version; /* Internet protocol. */
139 };
140
141 /* Initialization data for hash RX queues. */
142 const struct hash_rxq_init hash_rxq_init[] = {
143         [HASH_RXQ_TCPV4] = {
144                 .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
145                                 IBV_RX_HASH_DST_IPV4 |
146                                 IBV_RX_HASH_SRC_PORT_TCP |
147                                 IBV_RX_HASH_DST_PORT_TCP),
148                 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
149                 .flow_priority = 0,
150                 .ip_version = MLX5_IPV4,
151         },
152         [HASH_RXQ_UDPV4] = {
153                 .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
154                                 IBV_RX_HASH_DST_IPV4 |
155                                 IBV_RX_HASH_SRC_PORT_UDP |
156                                 IBV_RX_HASH_DST_PORT_UDP),
157                 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
158                 .flow_priority = 0,
159                 .ip_version = MLX5_IPV4,
160         },
161         [HASH_RXQ_IPV4] = {
162                 .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
163                                 IBV_RX_HASH_DST_IPV4),
164                 .dpdk_rss_hf = (ETH_RSS_IPV4 |
165                                 ETH_RSS_FRAG_IPV4),
166                 .flow_priority = 1,
167                 .ip_version = MLX5_IPV4,
168         },
169         [HASH_RXQ_TCPV6] = {
170                 .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
171                                 IBV_RX_HASH_DST_IPV6 |
172                                 IBV_RX_HASH_SRC_PORT_TCP |
173                                 IBV_RX_HASH_DST_PORT_TCP),
174                 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
175                 .flow_priority = 0,
176                 .ip_version = MLX5_IPV6,
177         },
178         [HASH_RXQ_UDPV6] = {
179                 .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
180                                 IBV_RX_HASH_DST_IPV6 |
181                                 IBV_RX_HASH_SRC_PORT_UDP |
182                                 IBV_RX_HASH_DST_PORT_UDP),
183                 .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
184                 .flow_priority = 0,
185                 .ip_version = MLX5_IPV6,
186         },
187         [HASH_RXQ_IPV6] = {
188                 .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
189                                 IBV_RX_HASH_DST_IPV6),
190                 .dpdk_rss_hf = (ETH_RSS_IPV6 |
191                                 ETH_RSS_FRAG_IPV6),
192                 .flow_priority = 1,
193                 .ip_version = MLX5_IPV6,
194         },
195         [HASH_RXQ_ETH] = {
196                 .hash_fields = 0,
197                 .dpdk_rss_hf = 0,
198                 .flow_priority = 2,
199         },
200 };
201
202 /* Number of entries in hash_rxq_init[]. */
203 const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
204
205 /** Structure for holding counter stats. */
206 struct mlx5_flow_counter_stats {
207         uint64_t hits; /**< Number of packets matched by the rule. */
208         uint64_t bytes; /**< Number of bytes matched by the rule. */
209 };
210
211 /** Structure for Drop queue. */
212 struct mlx5_hrxq_drop {
213         struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
214         struct ibv_qp *qp; /**< Verbs queue pair. */
215         struct ibv_wq *wq; /**< Verbs work queue. */
216         struct ibv_cq *cq; /**< Verbs completion queue. */
217 };
218
219 /* Flows structures. */
220 struct mlx5_flow {
221         uint64_t hash_fields; /**< Fields that participate in the hash. */
222         struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
223         struct ibv_flow *ibv_flow; /**< Verbs flow. */
224         struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
225 };
226
227 /* Drop flows structures. */
228 struct mlx5_flow_drop {
229         struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
230         struct ibv_flow *ibv_flow; /**< Verbs flow. */
231 };
232
233 struct rte_flow {
234         TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
235         uint32_t mark:1; /**< Set if the flow is marked. */
236         uint32_t drop:1; /**< Drop queue. */
237         uint16_t queues_n; /**< Number of entries in queue[]. */
238         uint16_t (*queues)[]; /**< Queues indexes to use. */
239         struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
240         uint8_t rss_key[40]; /**< copy of the RSS key. */
241         struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
242         struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
243         struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
244         /**< Flow with Rx queue. */
245 };
246
247 /** Static initializer for items. */
248 #define ITEMS(...) \
249         (const enum rte_flow_item_type []){ \
250                 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
251         }
252
253 /** Structure to generate a simple graph of layers supported by the NIC. */
254 struct mlx5_flow_items {
255         /** List of possible actions for these items. */
256         const enum rte_flow_action_type *const actions;
257         /** Bit-masks corresponding to the possibilities for the item. */
258         const void *mask;
259         /**
260          * Default bit-masks to use when item->mask is not provided. When
261          * \default_mask is also NULL, the full supported bit-mask (\mask) is
262          * used instead.
263          */
264         const void *default_mask;
265         /** Bit-masks size in bytes. */
266         const unsigned int mask_sz;
267         /**
268          * Conversion function from rte_flow to NIC specific flow.
269          *
270          * @param item
271          *   rte_flow item to convert.
272          * @param default_mask
273          *   Default bit-masks to use when item->mask is not provided.
274          * @param data
275          *   Internal structure to store the conversion.
276          *
277          * @return
278          *   0 on success, negative value otherwise.
279          */
280         int (*convert)(const struct rte_flow_item *item,
281                        const void *default_mask,
282                        void *data);
283         /** Size in bytes of the destination structure. */
284         const unsigned int dst_sz;
285         /** List of possible following items.  */
286         const enum rte_flow_item_type *const items;
287 };
288
289 /** Valid action for this PMD. */
290 static const enum rte_flow_action_type valid_actions[] = {
291         RTE_FLOW_ACTION_TYPE_DROP,
292         RTE_FLOW_ACTION_TYPE_QUEUE,
293         RTE_FLOW_ACTION_TYPE_MARK,
294         RTE_FLOW_ACTION_TYPE_FLAG,
295 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
296         RTE_FLOW_ACTION_TYPE_COUNT,
297 #endif
298         RTE_FLOW_ACTION_TYPE_END,
299 };
300
301 /** Graph of supported items and associated actions. */
302 static const struct mlx5_flow_items mlx5_flow_items[] = {
303         [RTE_FLOW_ITEM_TYPE_END] = {
304                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
305                                RTE_FLOW_ITEM_TYPE_VXLAN),
306         },
307         [RTE_FLOW_ITEM_TYPE_ETH] = {
308                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
309                                RTE_FLOW_ITEM_TYPE_IPV4,
310                                RTE_FLOW_ITEM_TYPE_IPV6),
311                 .actions = valid_actions,
312                 .mask = &(const struct rte_flow_item_eth){
313                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
314                         .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
315                         .type = -1,
316                 },
317                 .default_mask = &rte_flow_item_eth_mask,
318                 .mask_sz = sizeof(struct rte_flow_item_eth),
319                 .convert = mlx5_flow_create_eth,
320                 .dst_sz = sizeof(struct ibv_flow_spec_eth),
321         },
322         [RTE_FLOW_ITEM_TYPE_VLAN] = {
323                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
324                                RTE_FLOW_ITEM_TYPE_IPV6),
325                 .actions = valid_actions,
326                 .mask = &(const struct rte_flow_item_vlan){
327                         .tci = -1,
328                 },
329                 .default_mask = &rte_flow_item_vlan_mask,
330                 .mask_sz = sizeof(struct rte_flow_item_vlan),
331                 .convert = mlx5_flow_create_vlan,
332                 .dst_sz = 0,
333         },
334         [RTE_FLOW_ITEM_TYPE_IPV4] = {
335                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
336                                RTE_FLOW_ITEM_TYPE_TCP),
337                 .actions = valid_actions,
338                 .mask = &(const struct rte_flow_item_ipv4){
339                         .hdr = {
340                                 .src_addr = -1,
341                                 .dst_addr = -1,
342                                 .type_of_service = -1,
343                                 .next_proto_id = -1,
344                                 .time_to_live = -1,
345                         },
346                 },
347                 .default_mask = &rte_flow_item_ipv4_mask,
348                 .mask_sz = sizeof(struct rte_flow_item_ipv4),
349                 .convert = mlx5_flow_create_ipv4,
350                 .dst_sz = sizeof(struct ibv_flow_spec_ipv4_ext),
351         },
352         [RTE_FLOW_ITEM_TYPE_IPV6] = {
353                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
354                                RTE_FLOW_ITEM_TYPE_TCP),
355                 .actions = valid_actions,
356                 .mask = &(const struct rte_flow_item_ipv6){
357                         .hdr = {
358                                 .src_addr = {
359                                         0xff, 0xff, 0xff, 0xff,
360                                         0xff, 0xff, 0xff, 0xff,
361                                         0xff, 0xff, 0xff, 0xff,
362                                         0xff, 0xff, 0xff, 0xff,
363                                 },
364                                 .dst_addr = {
365                                         0xff, 0xff, 0xff, 0xff,
366                                         0xff, 0xff, 0xff, 0xff,
367                                         0xff, 0xff, 0xff, 0xff,
368                                         0xff, 0xff, 0xff, 0xff,
369                                 },
370                                 .vtc_flow = -1,
371                                 .proto = -1,
372                                 .hop_limits = -1,
373                         },
374                 },
375                 .default_mask = &rte_flow_item_ipv6_mask,
376                 .mask_sz = sizeof(struct rte_flow_item_ipv6),
377                 .convert = mlx5_flow_create_ipv6,
378                 .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
379         },
380         [RTE_FLOW_ITEM_TYPE_UDP] = {
381                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
382                 .actions = valid_actions,
383                 .mask = &(const struct rte_flow_item_udp){
384                         .hdr = {
385                                 .src_port = -1,
386                                 .dst_port = -1,
387                         },
388                 },
389                 .default_mask = &rte_flow_item_udp_mask,
390                 .mask_sz = sizeof(struct rte_flow_item_udp),
391                 .convert = mlx5_flow_create_udp,
392                 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
393         },
394         [RTE_FLOW_ITEM_TYPE_TCP] = {
395                 .actions = valid_actions,
396                 .mask = &(const struct rte_flow_item_tcp){
397                         .hdr = {
398                                 .src_port = -1,
399                                 .dst_port = -1,
400                         },
401                 },
402                 .default_mask = &rte_flow_item_tcp_mask,
403                 .mask_sz = sizeof(struct rte_flow_item_tcp),
404                 .convert = mlx5_flow_create_tcp,
405                 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
406         },
407         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
408                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
409                 .actions = valid_actions,
410                 .mask = &(const struct rte_flow_item_vxlan){
411                         .vni = "\xff\xff\xff",
412                 },
413                 .default_mask = &rte_flow_item_vxlan_mask,
414                 .mask_sz = sizeof(struct rte_flow_item_vxlan),
415                 .convert = mlx5_flow_create_vxlan,
416                 .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
417         },
418 };
419
420 /** Structure to pass to the conversion function. */
421 struct mlx5_flow_parse {
422         uint32_t inner; /**< Set once VXLAN is encountered. */
423         uint32_t allmulti:1; /**< Set once allmulti dst MAC is encountered. */
424         uint32_t create:1;
425         /**< Whether resources should remain after a validate. */
426         uint32_t drop:1; /**< Target is a drop queue. */
427         uint32_t mark:1; /**< Mark is present in the flow. */
428         uint32_t count:1; /**< Count is present in the flow. */
429         uint32_t mark_id; /**< Mark identifier. */
430         uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
431         uint16_t queues_n; /**< Number of entries in queue[]. */
432         struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
433         uint8_t rss_key[40]; /**< copy of the RSS key. */
434         enum hash_rxq_type layer; /**< Last pattern layer detected. */
435         struct ibv_counter_set *cs; /**< Holds the counter set for the rule */
436         struct {
437                 struct ibv_flow_attr *ibv_attr;
438                 /**< Pointer to Verbs attributes. */
439                 unsigned int offset;
440                 /**< Current position or total size of the attribute. */
441         } queue[RTE_DIM(hash_rxq_init)];
442 };
443
444 static const struct rte_flow_ops mlx5_flow_ops = {
445         .validate = mlx5_flow_validate,
446         .create = mlx5_flow_create,
447         .destroy = mlx5_flow_destroy,
448         .flush = mlx5_flow_flush,
449 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
450         .query = mlx5_flow_query,
451 #else
452         .query = NULL,
453 #endif
454         .isolate = mlx5_flow_isolate,
455 };
456
457 /* Convert FDIR request to Generic flow. */
458 struct mlx5_fdir {
459         struct rte_flow_attr attr;
460         struct rte_flow_action actions[2];
461         struct rte_flow_item items[4];
462         struct rte_flow_item_eth l2;
463         struct rte_flow_item_eth l2_mask;
464         union {
465                 struct rte_flow_item_ipv4 ipv4;
466                 struct rte_flow_item_ipv6 ipv6;
467         } l3;
468         union {
469                 struct rte_flow_item_udp udp;
470                 struct rte_flow_item_tcp tcp;
471         } l4;
472         struct rte_flow_action_queue queue;
473 };
474
475 /* Verbs specification header. */
476 struct ibv_spec_header {
477         enum ibv_flow_spec_type type;
478         uint16_t size;
479 };
480
481 /**
482  * Check support for a given item.
483  *
484  * @param item[in]
485  *   Item specification.
486  * @param mask[in]
487  *   Bit-masks covering supported fields to compare with spec, last and mask in
488  *   \item.
489  * @param size
490  *   Bit-Mask size in bytes.
491  *
492  * @return
493  *   0 on success.
494  */
495 static int
496 mlx5_flow_item_validate(const struct rte_flow_item *item,
497                         const uint8_t *mask, unsigned int size)
498 {
499         int ret = 0;
500
501         if (!item->spec && (item->mask || item->last))
502                 return -1;
503         if (item->spec && !item->mask) {
504                 unsigned int i;
505                 const uint8_t *spec = item->spec;
506
507                 for (i = 0; i < size; ++i)
508                         if ((spec[i] | mask[i]) != mask[i])
509                                 return -1;
510         }
511         if (item->last && !item->mask) {
512                 unsigned int i;
513                 const uint8_t *spec = item->last;
514
515                 for (i = 0; i < size; ++i)
516                         if ((spec[i] | mask[i]) != mask[i])
517                                 return -1;
518         }
519         if (item->mask) {
520                 unsigned int i;
521                 const uint8_t *spec = item->spec;
522
523                 for (i = 0; i < size; ++i)
524                         if ((spec[i] | mask[i]) != mask[i])
525                                 return -1;
526         }
527         if (item->spec && item->last) {
528                 uint8_t spec[size];
529                 uint8_t last[size];
530                 const uint8_t *apply = mask;
531                 unsigned int i;
532
533                 if (item->mask)
534                         apply = item->mask;
535                 for (i = 0; i < size; ++i) {
536                         spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
537                         last[i] = ((const uint8_t *)item->last)[i] & apply[i];
538                 }
539                 ret = memcmp(spec, last, size);
540         }
541         return ret;
542 }
543
544 /**
545  * Copy the RSS configuration from the user ones.
546  *
547  * @param priv
548  *   Pointer to private structure.
549  * @param parser
550  *   Internal parser structure.
551  * @param rss_conf
552  *   User RSS configuration to save.
553  *
554  * @return
555  *   0 on success, errno value on failure.
556  */
557 static int
558 priv_flow_convert_rss_conf(struct priv *priv,
559                            struct mlx5_flow_parse *parser,
560                            const struct rte_eth_rss_conf *rss_conf)
561 {
562         const struct rte_eth_rss_conf *rss;
563
564         if (rss_conf) {
565                 if (rss_conf->rss_hf & MLX5_RSS_HF_MASK)
566                         return EINVAL;
567                 rss = rss_conf;
568         } else {
569                 rss = &priv->rss_conf;
570         }
571         if (rss->rss_key_len > 40)
572                 return EINVAL;
573         parser->rss_conf.rss_key_len = rss->rss_key_len;
574         parser->rss_conf.rss_hf = rss->rss_hf;
575         memcpy(parser->rss_key, rss->rss_key, rss->rss_key_len);
576         parser->rss_conf.rss_key = parser->rss_key;
577         return 0;
578 }
579
580 /**
581  * Extract attribute to the parser.
582  *
583  * @param priv
584  *   Pointer to private structure.
585  * @param[in] attr
586  *   Flow rule attributes.
587  * @param[out] error
588  *   Perform verbose error reporting if not NULL.
589  * @param[in, out] parser
590  *   Internal parser structure.
591  *
592  * @return
593  *   0 on success, a negative errno value otherwise and rte_errno is set.
594  */
595 static int
596 priv_flow_convert_attributes(struct priv *priv,
597                              const struct rte_flow_attr *attr,
598                              struct rte_flow_error *error,
599                              struct mlx5_flow_parse *parser)
600 {
601         (void)priv;
602         (void)parser;
603         if (attr->group) {
604                 rte_flow_error_set(error, ENOTSUP,
605                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
606                                    NULL,
607                                    "groups are not supported");
608                 return -rte_errno;
609         }
610         if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
611                 rte_flow_error_set(error, ENOTSUP,
612                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
613                                    NULL,
614                                    "priorities are not supported");
615                 return -rte_errno;
616         }
617         if (attr->egress) {
618                 rte_flow_error_set(error, ENOTSUP,
619                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
620                                    NULL,
621                                    "egress is not supported");
622                 return -rte_errno;
623         }
624         if (!attr->ingress) {
625                 rte_flow_error_set(error, ENOTSUP,
626                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
627                                    NULL,
628                                    "only ingress is supported");
629                 return -rte_errno;
630         }
631         return 0;
632 }
633
634 /**
635  * Extract actions request to the parser.
636  *
637  * @param priv
638  *   Pointer to private structure.
639  * @param[in] actions
640  *   Associated actions (list terminated by the END action).
641  * @param[out] error
642  *   Perform verbose error reporting if not NULL.
643  * @param[in, out] parser
644  *   Internal parser structure.
645  *
646  * @return
647  *   0 on success, a negative errno value otherwise and rte_errno is set.
648  */
649 static int
650 priv_flow_convert_actions(struct priv *priv,
651                           const struct rte_flow_action actions[],
652                           struct rte_flow_error *error,
653                           struct mlx5_flow_parse *parser)
654 {
655         /*
656          * Add default RSS configuration necessary for Verbs to create QP even
657          * if no RSS is necessary.
658          */
659         priv_flow_convert_rss_conf(priv, parser,
660                                    (const struct rte_eth_rss_conf *)
661                                    &priv->rss_conf);
662         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
663                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
664                         continue;
665                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
666                         parser->drop = 1;
667                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
668                         const struct rte_flow_action_queue *queue =
669                                 (const struct rte_flow_action_queue *)
670                                 actions->conf;
671                         uint16_t n;
672                         uint16_t found = 0;
673
674                         if (!queue || (queue->index > (priv->rxqs_n - 1)))
675                                 goto exit_action_not_supported;
676                         for (n = 0; n < parser->queues_n; ++n) {
677                                 if (parser->queues[n] == queue->index) {
678                                         found = 1;
679                                         break;
680                                 }
681                         }
682                         if (parser->queues_n > 1 && !found) {
683                                 rte_flow_error_set(error, ENOTSUP,
684                                            RTE_FLOW_ERROR_TYPE_ACTION,
685                                            actions,
686                                            "queue action not in RSS queues");
687                                 return -rte_errno;
688                         }
689                         if (!found) {
690                                 parser->queues_n = 1;
691                                 parser->queues[0] = queue->index;
692                         }
693                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
694                         const struct rte_flow_action_rss *rss =
695                                 (const struct rte_flow_action_rss *)
696                                 actions->conf;
697                         uint16_t n;
698
699                         if (!rss || !rss->num) {
700                                 rte_flow_error_set(error, EINVAL,
701                                                    RTE_FLOW_ERROR_TYPE_ACTION,
702                                                    actions,
703                                                    "no valid queues");
704                                 return -rte_errno;
705                         }
706                         if (parser->queues_n == 1) {
707                                 uint16_t found = 0;
708
709                                 assert(parser->queues_n);
710                                 for (n = 0; n < rss->num; ++n) {
711                                         if (parser->queues[0] ==
712                                             rss->queue[n]) {
713                                                 found = 1;
714                                                 break;
715                                         }
716                                 }
717                                 if (!found) {
718                                         rte_flow_error_set(error, ENOTSUP,
719                                                    RTE_FLOW_ERROR_TYPE_ACTION,
720                                                    actions,
721                                                    "queue action not in RSS"
722                                                    " queues");
723                                         return -rte_errno;
724                                 }
725                         }
726                         for (n = 0; n < rss->num; ++n) {
727                                 if (rss->queue[n] >= priv->rxqs_n) {
728                                         rte_flow_error_set(error, EINVAL,
729                                                    RTE_FLOW_ERROR_TYPE_ACTION,
730                                                    actions,
731                                                    "queue id > number of"
732                                                    " queues");
733                                         return -rte_errno;
734                                 }
735                         }
736                         for (n = 0; n < rss->num; ++n)
737                                 parser->queues[n] = rss->queue[n];
738                         parser->queues_n = rss->num;
739                         if (priv_flow_convert_rss_conf(priv, parser,
740                                                        rss->rss_conf)) {
741                                 rte_flow_error_set(error, EINVAL,
742                                                    RTE_FLOW_ERROR_TYPE_ACTION,
743                                                    actions,
744                                                    "wrong RSS configuration");
745                                 return -rte_errno;
746                         }
747                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
748                         const struct rte_flow_action_mark *mark =
749                                 (const struct rte_flow_action_mark *)
750                                 actions->conf;
751
752                         if (!mark) {
753                                 rte_flow_error_set(error, EINVAL,
754                                                    RTE_FLOW_ERROR_TYPE_ACTION,
755                                                    actions,
756                                                    "mark must be defined");
757                                 return -rte_errno;
758                         } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
759                                 rte_flow_error_set(error, ENOTSUP,
760                                                    RTE_FLOW_ERROR_TYPE_ACTION,
761                                                    actions,
762                                                    "mark must be between 0"
763                                                    " and 16777199");
764                                 return -rte_errno;
765                         }
766                         parser->mark = 1;
767                         parser->mark_id = mark->id;
768                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
769                         parser->mark = 1;
770                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
771                            priv->config.flow_counter_en) {
772                         parser->count = 1;
773                 } else {
774                         goto exit_action_not_supported;
775                 }
776         }
777         if (parser->drop && parser->mark)
778                 parser->mark = 0;
779         if (!parser->queues_n && !parser->drop) {
780                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
781                                    NULL, "no valid action");
782                 return -rte_errno;
783         }
784         return 0;
785 exit_action_not_supported:
786         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
787                            actions, "action not supported");
788         return -rte_errno;
789 }
790
791 /**
792  * Validate items.
793  *
794  * @param priv
795  *   Pointer to private structure.
796  * @param[in] items
797  *   Pattern specification (list terminated by the END pattern item).
798  * @param[out] error
799  *   Perform verbose error reporting if not NULL.
800  * @param[in, out] parser
801  *   Internal parser structure.
802  *
803  * @return
804  *   0 on success, a negative errno value otherwise and rte_errno is set.
805  */
806 static int
807 priv_flow_convert_items_validate(struct priv *priv,
808                                  const struct rte_flow_item items[],
809                                  struct rte_flow_error *error,
810                                  struct mlx5_flow_parse *parser)
811 {
812         const struct mlx5_flow_items *cur_item = mlx5_flow_items;
813         unsigned int i;
814
815         (void)priv;
816         /* Initialise the offsets to start after verbs attribute. */
817         for (i = 0; i != hash_rxq_init_n; ++i)
818                 parser->queue[i].offset = sizeof(struct ibv_flow_attr);
819         for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
820                 const struct mlx5_flow_items *token = NULL;
821                 unsigned int n;
822                 int err;
823
824                 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
825                         continue;
826                 for (i = 0;
827                      cur_item->items &&
828                      cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
829                      ++i) {
830                         if (cur_item->items[i] == items->type) {
831                                 token = &mlx5_flow_items[items->type];
832                                 break;
833                         }
834                 }
835                 if (!token)
836                         goto exit_item_not_supported;
837                 cur_item = token;
838                 err = mlx5_flow_item_validate(items,
839                                               (const uint8_t *)cur_item->mask,
840                                               cur_item->mask_sz);
841                 if (err)
842                         goto exit_item_not_supported;
843                 if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
844                         if (parser->inner) {
845                                 rte_flow_error_set(error, ENOTSUP,
846                                                    RTE_FLOW_ERROR_TYPE_ITEM,
847                                                    items,
848                                                    "cannot recognize multiple"
849                                                    " VXLAN encapsulations");
850                                 return -rte_errno;
851                         }
852                         parser->inner = IBV_FLOW_SPEC_INNER;
853                 }
854                 if (parser->drop || parser->queues_n == 1) {
855                         parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;
856                 } else {
857                         for (n = 0; n != hash_rxq_init_n; ++n)
858                                 parser->queue[n].offset += cur_item->dst_sz;
859                 }
860         }
861         if (parser->drop) {
862                 parser->queue[HASH_RXQ_ETH].offset +=
863                         sizeof(struct ibv_flow_spec_action_drop);
864         }
865         if (parser->mark) {
866                 for (i = 0; i != hash_rxq_init_n; ++i)
867                         parser->queue[i].offset +=
868                                 sizeof(struct ibv_flow_spec_action_tag);
869         }
870         if (parser->count) {
871                 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
872
873                 for (i = 0; i != hash_rxq_init_n; ++i)
874                         parser->queue[i].offset += size;
875         }
876         return 0;
877 exit_item_not_supported:
878         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
879                            items, "item not supported");
880         return -rte_errno;
881 }
882
883 /**
884  * Allocate memory space to store verbs flow attributes.
885  *
886  * @param priv
887  *   Pointer to private structure.
888  * @param[in] priority
889  *   Flow priority.
890  * @param[in] size
891  *   Amount of byte to allocate.
892  * @param[out] error
893  *   Perform verbose error reporting if not NULL.
894  *
895  * @return
896  *   A verbs flow attribute on success, NULL otherwise.
897  */
898 static struct ibv_flow_attr*
899 priv_flow_convert_allocate(struct priv *priv,
900                            unsigned int priority,
901                            unsigned int size,
902                            struct rte_flow_error *error)
903 {
904         struct ibv_flow_attr *ibv_attr;
905
906         (void)priv;
907         ibv_attr = rte_calloc(__func__, 1, size, 0);
908         if (!ibv_attr) {
909                 rte_flow_error_set(error, ENOMEM,
910                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
911                                    NULL,
912                                    "cannot allocate verbs spec attributes.");
913                 return NULL;
914         }
915         ibv_attr->priority = priority;
916         return ibv_attr;
917 }
918
919 /**
920  * Finalise verbs flow attributes.
921  *
922  * @param priv
923  *   Pointer to private structure.
924  * @param[in, out] parser
925  *   Internal parser structure.
926  */
927 static void
928 priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser)
929 {
930         const unsigned int ipv4 =
931                 hash_rxq_init[parser->layer].ip_version == MLX5_IPV4;
932         const enum hash_rxq_type hmin = ipv4 ? HASH_RXQ_TCPV4 : HASH_RXQ_TCPV6;
933         const enum hash_rxq_type hmax = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
934         const enum hash_rxq_type ohmin = ipv4 ? HASH_RXQ_TCPV6 : HASH_RXQ_TCPV4;
935         const enum hash_rxq_type ohmax = ipv4 ? HASH_RXQ_IPV6 : HASH_RXQ_IPV4;
936         const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
937         unsigned int i;
938
939         (void)priv;
940         if (parser->layer == HASH_RXQ_ETH) {
941                 goto fill;
942         } else {
943                 /*
944                  * This layer becomes useless as the pattern define under
945                  * layers.
946                  */
947                 rte_free(parser->queue[HASH_RXQ_ETH].ibv_attr);
948                 parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
949         }
950         /* Remove opposite kind of layer e.g. IPv6 if the pattern is IPv4. */
951         for (i = ohmin; i != (ohmax + 1); ++i) {
952                 if (!parser->queue[i].ibv_attr)
953                         continue;
954                 rte_free(parser->queue[i].ibv_attr);
955                 parser->queue[i].ibv_attr = NULL;
956         }
957         /* Remove impossible flow according to the RSS configuration. */
958         if (hash_rxq_init[parser->layer].dpdk_rss_hf &
959             parser->rss_conf.rss_hf) {
960                 /* Remove any other flow. */
961                 for (i = hmin; i != (hmax + 1); ++i) {
962                         if ((i == parser->layer) ||
963                              (!parser->queue[i].ibv_attr))
964                                 continue;
965                         rte_free(parser->queue[i].ibv_attr);
966                         parser->queue[i].ibv_attr = NULL;
967                 }
968         } else  if (!parser->queue[ip].ibv_attr) {
969                 /* no RSS possible with the current configuration. */
970                 parser->queues_n = 1;
971                 return;
972         }
973 fill:
974         /*
975          * Fill missing layers in verbs specifications, or compute the correct
976          * offset to allocate the memory space for the attributes and
977          * specifications.
978          */
979         for (i = 0; i != hash_rxq_init_n - 1; ++i) {
980                 union {
981                         struct ibv_flow_spec_ipv4_ext ipv4;
982                         struct ibv_flow_spec_ipv6 ipv6;
983                         struct ibv_flow_spec_tcp_udp udp_tcp;
984                 } specs;
985                 void *dst;
986                 uint16_t size;
987
988                 if (i == parser->layer)
989                         continue;
990                 if (parser->layer == HASH_RXQ_ETH) {
991                         if (hash_rxq_init[i].ip_version == MLX5_IPV4) {
992                                 size = sizeof(struct ibv_flow_spec_ipv4_ext);
993                                 specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){
994                                         .type = IBV_FLOW_SPEC_IPV4_EXT,
995                                         .size = size,
996                                 };
997                         } else {
998                                 size = sizeof(struct ibv_flow_spec_ipv6);
999                                 specs.ipv6 = (struct ibv_flow_spec_ipv6){
1000                                         .type = IBV_FLOW_SPEC_IPV6,
1001                                         .size = size,
1002                                 };
1003                         }
1004                         if (parser->queue[i].ibv_attr) {
1005                                 dst = (void *)((uintptr_t)
1006                                                parser->queue[i].ibv_attr +
1007                                                parser->queue[i].offset);
1008                                 memcpy(dst, &specs, size);
1009                                 ++parser->queue[i].ibv_attr->num_of_specs;
1010                         }
1011                         parser->queue[i].offset += size;
1012                 }
1013                 if ((i == HASH_RXQ_UDPV4) || (i == HASH_RXQ_TCPV4) ||
1014                     (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) {
1015                         size = sizeof(struct ibv_flow_spec_tcp_udp);
1016                         specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) {
1017                                 .type = ((i == HASH_RXQ_UDPV4 ||
1018                                           i == HASH_RXQ_UDPV6) ?
1019                                          IBV_FLOW_SPEC_UDP :
1020                                          IBV_FLOW_SPEC_TCP),
1021                                 .size = size,
1022                         };
1023                         if (parser->queue[i].ibv_attr) {
1024                                 dst = (void *)((uintptr_t)
1025                                                parser->queue[i].ibv_attr +
1026                                                parser->queue[i].offset);
1027                                 memcpy(dst, &specs, size);
1028                                 ++parser->queue[i].ibv_attr->num_of_specs;
1029                         }
1030                         parser->queue[i].offset += size;
1031                 }
1032         }
1033 }
1034
1035 /**
1036  * Validate and convert a flow supported by the NIC.
1037  *
1038  * @param priv
1039  *   Pointer to private structure.
1040  * @param[in] attr
1041  *   Flow rule attributes.
1042  * @param[in] pattern
1043  *   Pattern specification (list terminated by the END pattern item).
1044  * @param[in] actions
1045  *   Associated actions (list terminated by the END action).
1046  * @param[out] error
1047  *   Perform verbose error reporting if not NULL.
1048  * @param[in, out] parser
1049  *   Internal parser structure.
1050  *
1051  * @return
1052  *   0 on success, a negative errno value otherwise and rte_errno is set.
1053  */
1054 static int
1055 priv_flow_convert(struct priv *priv,
1056                   const struct rte_flow_attr *attr,
1057                   const struct rte_flow_item items[],
1058                   const struct rte_flow_action actions[],
1059                   struct rte_flow_error *error,
1060                   struct mlx5_flow_parse *parser)
1061 {
1062         const struct mlx5_flow_items *cur_item = mlx5_flow_items;
1063         unsigned int i;
1064         int ret;
1065
1066         /* First step. Validate the attributes, items and actions. */
1067         *parser = (struct mlx5_flow_parse){
1068                 .create = parser->create,
1069                 .layer = HASH_RXQ_ETH,
1070                 .mark_id = MLX5_FLOW_MARK_DEFAULT,
1071         };
1072         ret = priv_flow_convert_attributes(priv, attr, error, parser);
1073         if (ret)
1074                 return ret;
1075         ret = priv_flow_convert_actions(priv, actions, error, parser);
1076         if (ret)
1077                 return ret;
1078         ret = priv_flow_convert_items_validate(priv, items, error, parser);
1079         if (ret)
1080                 return ret;
1081         priv_flow_convert_finalise(priv, parser);
1082         /*
1083          * Second step.
1084          * Allocate the memory space to store verbs specifications.
1085          */
1086         if (parser->drop || parser->queues_n == 1) {
1087                 unsigned int priority =
1088                         attr->priority +
1089                         hash_rxq_init[HASH_RXQ_ETH].flow_priority;
1090                 unsigned int offset = parser->queue[HASH_RXQ_ETH].offset;
1091
1092                 parser->queue[HASH_RXQ_ETH].ibv_attr =
1093                         priv_flow_convert_allocate(priv, priority,
1094                                                    offset, error);
1095                 if (!parser->queue[HASH_RXQ_ETH].ibv_attr)
1096                         return ENOMEM;
1097                 parser->queue[HASH_RXQ_ETH].offset =
1098                         sizeof(struct ibv_flow_attr);
1099         } else {
1100                 for (i = 0; i != hash_rxq_init_n; ++i) {
1101                         unsigned int priority =
1102                                 attr->priority +
1103                                 hash_rxq_init[i].flow_priority;
1104                         unsigned int offset;
1105
1106                         if (!(parser->rss_conf.rss_hf &
1107                               hash_rxq_init[i].dpdk_rss_hf) &&
1108                             (i != HASH_RXQ_ETH))
1109                                 continue;
1110                         offset = parser->queue[i].offset;
1111                         parser->queue[i].ibv_attr =
1112                                 priv_flow_convert_allocate(priv, priority,
1113                                                            offset, error);
1114                         if (!parser->queue[i].ibv_attr)
1115                                 goto exit_enomem;
1116                         parser->queue[i].offset = sizeof(struct ibv_flow_attr);
1117                 }
1118         }
1119         /* Third step. Conversion parse, fill the specifications. */
1120         parser->inner = 0;
1121         for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
1122                 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
1123                         continue;
1124                 cur_item = &mlx5_flow_items[items->type];
1125                 ret = cur_item->convert(items,
1126                                         (cur_item->default_mask ?
1127                                          cur_item->default_mask :
1128                                          cur_item->mask),
1129                                         parser);
1130                 if (ret) {
1131                         rte_flow_error_set(error, ret,
1132                                            RTE_FLOW_ERROR_TYPE_ITEM,
1133                                            items, "item not supported");
1134                         goto exit_free;
1135                 }
1136         }
1137         if (parser->mark)
1138                 mlx5_flow_create_flag_mark(parser, parser->mark_id);
1139         if (parser->count && parser->create) {
1140                 mlx5_flow_create_count(priv, parser);
1141                 if (!parser->cs)
1142                         goto exit_count_error;
1143         }
1144         /*
1145          * Last step. Complete missing specification to reach the RSS
1146          * configuration.
1147          */
1148         if (parser->queues_n > 1) {
1149                 priv_flow_convert_finalise(priv, parser);
1150         } else {
1151                 /*
1152                  * Action queue have their priority overridden with
1153                  * Ethernet priority, this priority needs to be adjusted to
1154                  * their most specific layer priority.
1155                  */
1156                 parser->queue[HASH_RXQ_ETH].ibv_attr->priority =
1157                         attr->priority +
1158                         hash_rxq_init[parser->layer].flow_priority;
1159         }
1160         if (parser->allmulti &&
1161             parser->layer == HASH_RXQ_ETH) {
1162                 for (i = 0; i != hash_rxq_init_n; ++i) {
1163                         if (!parser->queue[i].ibv_attr)
1164                                 continue;
1165                         if (parser->queue[i].ibv_attr->num_of_specs != 1)
1166                                 break;
1167                         parser->queue[i].ibv_attr->type =
1168                                                 IBV_FLOW_ATTR_MC_DEFAULT;
1169                 }
1170         }
1171 exit_free:
1172         /* Only verification is expected, all resources should be released. */
1173         if (!parser->create) {
1174                 for (i = 0; i != hash_rxq_init_n; ++i) {
1175                         if (parser->queue[i].ibv_attr) {
1176                                 rte_free(parser->queue[i].ibv_attr);
1177                                 parser->queue[i].ibv_attr = NULL;
1178                         }
1179                 }
1180         }
1181         return ret;
1182 exit_enomem:
1183         for (i = 0; i != hash_rxq_init_n; ++i) {
1184                 if (parser->queue[i].ibv_attr) {
1185                         rte_free(parser->queue[i].ibv_attr);
1186                         parser->queue[i].ibv_attr = NULL;
1187                 }
1188         }
1189         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1190                            NULL, "cannot allocate verbs spec attributes.");
1191         return ret;
1192 exit_count_error:
1193         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1194                            NULL, "cannot create counter.");
1195         return rte_errno;
1196 }
1197
1198 /**
1199  * Copy the specification created into the flow.
1200  *
1201  * @param parser
1202  *   Internal parser structure.
1203  * @param src
1204  *   Create specification.
1205  * @param size
1206  *   Size in bytes of the specification to copy.
1207  */
1208 static void
1209 mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
1210                       unsigned int size)
1211 {
1212         unsigned int i;
1213         void *dst;
1214
1215         for (i = 0; i != hash_rxq_init_n; ++i) {
1216                 if (!parser->queue[i].ibv_attr)
1217                         continue;
1218                 /* Specification must be the same l3 type or none. */
1219                 if (parser->layer == HASH_RXQ_ETH ||
1220                     (hash_rxq_init[parser->layer].ip_version ==
1221                      hash_rxq_init[i].ip_version) ||
1222                     (hash_rxq_init[i].ip_version == 0)) {
1223                         dst = (void *)((uintptr_t)parser->queue[i].ibv_attr +
1224                                         parser->queue[i].offset);
1225                         memcpy(dst, src, size);
1226                         ++parser->queue[i].ibv_attr->num_of_specs;
1227                         parser->queue[i].offset += size;
1228                 }
1229         }
1230 }
1231
1232 /**
1233  * Convert Ethernet item to Verbs specification.
1234  *
1235  * @param item[in]
1236  *   Item specification.
1237  * @param default_mask[in]
1238  *   Default bit-masks to use when item->mask is not provided.
1239  * @param data[in, out]
1240  *   User structure.
1241  */
1242 static int
1243 mlx5_flow_create_eth(const struct rte_flow_item *item,
1244                      const void *default_mask,
1245                      void *data)
1246 {
1247         const struct rte_flow_item_eth *spec = item->spec;
1248         const struct rte_flow_item_eth *mask = item->mask;
1249         struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
1250         const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
1251         struct ibv_flow_spec_eth eth = {
1252                 .type = parser->inner | IBV_FLOW_SPEC_ETH,
1253                 .size = eth_size,
1254         };
1255
1256         /* Don't update layer for the inner pattern. */
1257         if (!parser->inner)
1258                 parser->layer = HASH_RXQ_ETH;
1259         if (spec) {
1260                 unsigned int i;
1261
1262                 if (!mask)
1263                         mask = default_mask;
1264                 memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
1265                 memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
1266                 eth.val.ether_type = spec->type;
1267                 memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
1268                 memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
1269                 eth.mask.ether_type = mask->type;
1270                 /* Remove unwanted bits from values. */
1271                 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
1272                         eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
1273                         eth.val.src_mac[i] &= eth.mask.src_mac[i];
1274                 }
1275                 eth.val.ether_type &= eth.mask.ether_type;
1276         }
1277         mlx5_flow_create_copy(parser, &eth, eth_size);
1278         parser->allmulti = eth.val.dst_mac[0] & 1;
1279         return 0;
1280 }
1281
1282 /**
1283  * Convert VLAN item to Verbs specification.
1284  *
1285  * @param item[in]
1286  *   Item specification.
1287  * @param default_mask[in]
1288  *   Default bit-masks to use when item->mask is not provided.
1289  * @param data[in, out]
1290  *   User structure.
1291  */
1292 static int
1293 mlx5_flow_create_vlan(const struct rte_flow_item *item,
1294                       const void *default_mask,
1295                       void *data)
1296 {
1297         const struct rte_flow_item_vlan *spec = item->spec;
1298         const struct rte_flow_item_vlan *mask = item->mask;
1299         struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
1300         struct ibv_flow_spec_eth *eth;
1301         const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
1302
1303         if (spec) {
1304                 unsigned int i;
1305                 if (!mask)
1306                         mask = default_mask;
1307
1308                 for (i = 0; i != hash_rxq_init_n; ++i) {
1309                         if (!parser->queue[i].ibv_attr)
1310                                 continue;
1311
1312                         eth = (void *)((uintptr_t)parser->queue[i].ibv_attr +
1313                                        parser->queue[i].offset - eth_size);
1314                         eth->val.vlan_tag = spec->tci;
1315                         eth->mask.vlan_tag = mask->tci;
1316                         eth->val.vlan_tag &= eth->mask.vlan_tag;
1317                 }
1318         }
1319         return 0;
1320 }
1321
1322 /**
1323  * Convert IPv4 item to Verbs specification.
1324  *
1325  * @param item[in]
1326  *   Item specification.
1327  * @param default_mask[in]
1328  *   Default bit-masks to use when item->mask is not provided.
1329  * @param data[in, out]
1330  *   User structure.
1331  */
1332 static int
1333 mlx5_flow_create_ipv4(const struct rte_flow_item *item,
1334                       const void *default_mask,
1335                       void *data)
1336 {
1337         const struct rte_flow_item_ipv4 *spec = item->spec;
1338         const struct rte_flow_item_ipv4 *mask = item->mask;
1339         struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
1340         unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
1341         struct ibv_flow_spec_ipv4_ext ipv4 = {
1342                 .type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT,
1343                 .size = ipv4_size,
1344         };
1345
1346         /* Don't update layer for the inner pattern. */
1347         if (!parser->inner)
1348                 parser->layer = HASH_RXQ_IPV4;
1349         if (spec) {
1350                 if (!mask)
1351                         mask = default_mask;
1352                 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
1353                         .src_ip = spec->hdr.src_addr,
1354                         .dst_ip = spec->hdr.dst_addr,
1355                         .proto = spec->hdr.next_proto_id,
1356                         .tos = spec->hdr.type_of_service,
1357                 };
1358                 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
1359                         .src_ip = mask->hdr.src_addr,
1360                         .dst_ip = mask->hdr.dst_addr,
1361                         .proto = mask->hdr.next_proto_id,
1362                         .tos = mask->hdr.type_of_service,
1363                 };
1364                 /* Remove unwanted bits from values. */
1365                 ipv4.val.src_ip &= ipv4.mask.src_ip;
1366                 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
1367                 ipv4.val.proto &= ipv4.mask.proto;
1368                 ipv4.val.tos &= ipv4.mask.tos;
1369         }
1370         mlx5_flow_create_copy(parser, &ipv4, ipv4_size);
1371         return 0;
1372 }
1373
1374 /**
1375  * Convert IPv6 item to Verbs specification.
1376  *
1377  * @param item[in]
1378  *   Item specification.
1379  * @param default_mask[in]
1380  *   Default bit-masks to use when item->mask is not provided.
1381  * @param data[in, out]
1382  *   User structure.
1383  */
1384 static int
1385 mlx5_flow_create_ipv6(const struct rte_flow_item *item,
1386                       const void *default_mask,
1387                       void *data)
1388 {
1389         const struct rte_flow_item_ipv6 *spec = item->spec;
1390         const struct rte_flow_item_ipv6 *mask = item->mask;
1391         struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
1392         unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
1393         struct ibv_flow_spec_ipv6 ipv6 = {
1394                 .type = parser->inner | IBV_FLOW_SPEC_IPV6,
1395                 .size = ipv6_size,
1396         };
1397
1398         /* Don't update layer for the inner pattern. */
1399         if (!parser->inner)
1400                 parser->layer = HASH_RXQ_IPV6;
1401         if (spec) {
1402                 unsigned int i;
1403                 uint32_t vtc_flow_val;
1404                 uint32_t vtc_flow_mask;
1405
1406                 if (!mask)
1407                         mask = default_mask;
1408                 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
1409                        RTE_DIM(ipv6.val.src_ip));
1410                 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
1411                        RTE_DIM(ipv6.val.dst_ip));
1412                 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
1413                        RTE_DIM(ipv6.mask.src_ip));
1414                 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
1415                        RTE_DIM(ipv6.mask.dst_ip));
1416                 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
1417                 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
1418                 ipv6.val.flow_label =
1419                         rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
1420                                          IPV6_HDR_FL_SHIFT);
1421                 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
1422                                          IPV6_HDR_TC_SHIFT;
1423                 ipv6.val.next_hdr = spec->hdr.proto;
1424                 ipv6.val.hop_limit = spec->hdr.hop_limits;
1425                 ipv6.mask.flow_label =
1426                         rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
1427                                          IPV6_HDR_FL_SHIFT);
1428                 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
1429                                           IPV6_HDR_TC_SHIFT;
1430                 ipv6.mask.next_hdr = mask->hdr.proto;
1431                 ipv6.mask.hop_limit = mask->hdr.hop_limits;
1432                 /* Remove unwanted bits from values. */
1433                 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
1434                         ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
1435                         ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
1436                 }
1437                 ipv6.val.flow_label &= ipv6.mask.flow_label;
1438                 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
1439                 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
1440                 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
1441         }
1442         mlx5_flow_create_copy(parser, &ipv6, ipv6_size);
1443         return 0;
1444 }
1445
1446 /**
1447  * Convert UDP item to Verbs specification.
1448  *
1449  * @param item[in]
1450  *   Item specification.
1451  * @param default_mask[in]
1452  *   Default bit-masks to use when item->mask is not provided.
1453  * @param data[in, out]
1454  *   User structure.
1455  */
1456 static int
1457 mlx5_flow_create_udp(const struct rte_flow_item *item,
1458                      const void *default_mask,
1459                      void *data)
1460 {
1461         const struct rte_flow_item_udp *spec = item->spec;
1462         const struct rte_flow_item_udp *mask = item->mask;
1463         struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
1464         unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
1465         struct ibv_flow_spec_tcp_udp udp = {
1466                 .type = parser->inner | IBV_FLOW_SPEC_UDP,
1467                 .size = udp_size,
1468         };
1469
1470         /* Don't update layer for the inner pattern. */
1471         if (!parser->inner) {
1472                 if (parser->layer == HASH_RXQ_IPV4)
1473                         parser->layer = HASH_RXQ_UDPV4;
1474                 else
1475                         parser->layer = HASH_RXQ_UDPV6;
1476         }
1477         if (spec) {
1478                 if (!mask)
1479                         mask = default_mask;
1480                 udp.val.dst_port = spec->hdr.dst_port;
1481                 udp.val.src_port = spec->hdr.src_port;
1482                 udp.mask.dst_port = mask->hdr.dst_port;
1483                 udp.mask.src_port = mask->hdr.src_port;
1484                 /* Remove unwanted bits from values. */
1485                 udp.val.src_port &= udp.mask.src_port;
1486                 udp.val.dst_port &= udp.mask.dst_port;
1487         }
1488         mlx5_flow_create_copy(parser, &udp, udp_size);
1489         return 0;
1490 }
1491
1492 /**
1493  * Convert TCP item to Verbs specification.
1494  *
1495  * @param item[in]
1496  *   Item specification.
1497  * @param default_mask[in]
1498  *   Default bit-masks to use when item->mask is not provided.
1499  * @param data[in, out]
1500  *   User structure.
1501  */
1502 static int
1503 mlx5_flow_create_tcp(const struct rte_flow_item *item,
1504                      const void *default_mask,
1505                      void *data)
1506 {
1507         const struct rte_flow_item_tcp *spec = item->spec;
1508         const struct rte_flow_item_tcp *mask = item->mask;
1509         struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
1510         unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
1511         struct ibv_flow_spec_tcp_udp tcp = {
1512                 .type = parser->inner | IBV_FLOW_SPEC_TCP,
1513                 .size = tcp_size,
1514         };
1515
1516         /* Don't update layer for the inner pattern. */
1517         if (!parser->inner) {
1518                 if (parser->layer == HASH_RXQ_IPV4)
1519                         parser->layer = HASH_RXQ_TCPV4;
1520                 else
1521                         parser->layer = HASH_RXQ_TCPV6;
1522         }
1523         if (spec) {
1524                 if (!mask)
1525                         mask = default_mask;
1526                 tcp.val.dst_port = spec->hdr.dst_port;
1527                 tcp.val.src_port = spec->hdr.src_port;
1528                 tcp.mask.dst_port = mask->hdr.dst_port;
1529                 tcp.mask.src_port = mask->hdr.src_port;
1530                 /* Remove unwanted bits from values. */
1531                 tcp.val.src_port &= tcp.mask.src_port;
1532                 tcp.val.dst_port &= tcp.mask.dst_port;
1533         }
1534         mlx5_flow_create_copy(parser, &tcp, tcp_size);
1535         return 0;
1536 }
1537
1538 /**
1539  * Convert VXLAN item to Verbs specification.
1540  *
1541  * @param item[in]
1542  *   Item specification.
1543  * @param default_mask[in]
1544  *   Default bit-masks to use when item->mask is not provided.
1545  * @param data[in, out]
1546  *   User structure.
1547  */
1548 static int
1549 mlx5_flow_create_vxlan(const struct rte_flow_item *item,
1550                        const void *default_mask,
1551                        void *data)
1552 {
1553         const struct rte_flow_item_vxlan *spec = item->spec;
1554         const struct rte_flow_item_vxlan *mask = item->mask;
1555         struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
1556         unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
1557         struct ibv_flow_spec_tunnel vxlan = {
1558                 .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
1559                 .size = size,
1560         };
1561         union vni {
1562                 uint32_t vlan_id;
1563                 uint8_t vni[4];
1564         } id;
1565
1566         id.vni[0] = 0;
1567         parser->inner = IBV_FLOW_SPEC_INNER;
1568         if (spec) {
1569                 if (!mask)
1570                         mask = default_mask;
1571                 memcpy(&id.vni[1], spec->vni, 3);
1572                 vxlan.val.tunnel_id = id.vlan_id;
1573                 memcpy(&id.vni[1], mask->vni, 3);
1574                 vxlan.mask.tunnel_id = id.vlan_id;
1575                 /* Remove unwanted bits from values. */
1576                 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
1577         }
1578         /*
1579          * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
1580          * layer is defined in the Verbs specification it is interpreted as
1581          * wildcard and all packets will match this rule, if it follows a full
1582          * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
1583          * before will also match this rule.
1584          * To avoid such situation, VNI 0 is currently refused.
1585          */
1586         if (!vxlan.val.tunnel_id)
1587                 return EINVAL;
1588         mlx5_flow_create_copy(parser, &vxlan, size);
1589         return 0;
1590 }
1591
1592 /**
1593  * Convert mark/flag action to Verbs specification.
1594  *
1595  * @param parser
1596  *   Internal parser structure.
1597  * @param mark_id
1598  *   Mark identifier.
1599  */
1600 static int
1601 mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
1602 {
1603         unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1604         struct ibv_flow_spec_action_tag tag = {
1605                 .type = IBV_FLOW_SPEC_ACTION_TAG,
1606                 .size = size,
1607                 .tag_id = mlx5_flow_mark_set(mark_id),
1608         };
1609
1610         assert(parser->mark);
1611         mlx5_flow_create_copy(parser, &tag, size);
1612         return 0;
1613 }
1614
1615 /**
1616  * Convert count action to Verbs specification.
1617  *
1618  * @param priv
1619  *   Pointer to private structure.
1620  * @param parser
1621  *   Pointer to MLX5 flow parser structure.
1622  *
1623  * @return
1624  *   0 on success, errno value on failure.
1625  */
1626 static int
1627 mlx5_flow_create_count(struct priv *priv __rte_unused,
1628                        struct mlx5_flow_parse *parser __rte_unused)
1629 {
1630 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
1631         unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1632         struct ibv_counter_set_init_attr init_attr = {0};
1633         struct ibv_flow_spec_counter_action counter = {
1634                 .type = IBV_FLOW_SPEC_ACTION_COUNT,
1635                 .size = size,
1636                 .counter_set_handle = 0,
1637         };
1638
1639         init_attr.counter_set_id = 0;
1640         parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr);
1641         if (!parser->cs)
1642                 return EINVAL;
1643         counter.counter_set_handle = parser->cs->handle;
1644         mlx5_flow_create_copy(parser, &counter, size);
1645 #endif
1646         return 0;
1647 }
1648
1649 /**
1650  * Complete flow rule creation with a drop queue.
1651  *
1652  * @param priv
1653  *   Pointer to private structure.
1654  * @param parser
1655  *   Internal parser structure.
1656  * @param flow
1657  *   Pointer to the rte_flow.
1658  * @param[out] error
1659  *   Perform verbose error reporting if not NULL.
1660  *
1661  * @return
1662  *   0 on success, errno value on failure.
1663  */
1664 static int
1665 priv_flow_create_action_queue_drop(struct priv *priv,
1666                                    struct mlx5_flow_parse *parser,
1667                                    struct rte_flow *flow,
1668                                    struct rte_flow_error *error)
1669 {
1670         struct ibv_flow_spec_action_drop *drop;
1671         unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1672         int err = 0;
1673
1674         assert(priv->pd);
1675         assert(priv->ctx);
1676         flow->drop = 1;
1677         drop = (void *)((uintptr_t)parser->queue[HASH_RXQ_ETH].ibv_attr +
1678                         parser->queue[HASH_RXQ_ETH].offset);
1679         *drop = (struct ibv_flow_spec_action_drop){
1680                         .type = IBV_FLOW_SPEC_ACTION_DROP,
1681                         .size = size,
1682         };
1683         ++parser->queue[HASH_RXQ_ETH].ibv_attr->num_of_specs;
1684         parser->queue[HASH_RXQ_ETH].offset += size;
1685         flow->frxq[HASH_RXQ_ETH].ibv_attr =
1686                 parser->queue[HASH_RXQ_ETH].ibv_attr;
1687         if (parser->count)
1688                 flow->cs = parser->cs;
1689         if (!priv->dev->data->dev_started)
1690                 return 0;
1691         parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
1692         flow->frxq[HASH_RXQ_ETH].ibv_flow =
1693                 mlx5_glue->create_flow(priv->flow_drop_queue->qp,
1694                                        flow->frxq[HASH_RXQ_ETH].ibv_attr);
1695         if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
1696                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1697                                    NULL, "flow rule creation failure");
1698                 err = ENOMEM;
1699                 goto error;
1700         }
1701         return 0;
1702 error:
1703         assert(flow);
1704         if (flow->frxq[HASH_RXQ_ETH].ibv_flow) {
1705                 claim_zero(mlx5_glue->destroy_flow
1706                            (flow->frxq[HASH_RXQ_ETH].ibv_flow));
1707                 flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
1708         }
1709         if (flow->frxq[HASH_RXQ_ETH].ibv_attr) {
1710                 rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr);
1711                 flow->frxq[HASH_RXQ_ETH].ibv_attr = NULL;
1712         }
1713         if (flow->cs) {
1714                 claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
1715                 flow->cs = NULL;
1716                 parser->cs = NULL;
1717         }
1718         return err;
1719 }
1720
1721 /**
1722  * Create hash Rx queues when RSS is enabled.
1723  *
1724  * @param priv
1725  *   Pointer to private structure.
1726  * @param parser
1727  *   Internal parser structure.
1728  * @param flow
1729  *   Pointer to the rte_flow.
1730  * @param[out] error
1731  *   Perform verbose error reporting if not NULL.
1732  *
1733  * @return
1734  *   0 on success, a errno value otherwise and rte_errno is set.
1735  */
1736 static int
1737 priv_flow_create_action_queue_rss(struct priv *priv,
1738                                   struct mlx5_flow_parse *parser,
1739                                   struct rte_flow *flow,
1740                                   struct rte_flow_error *error)
1741 {
1742         unsigned int i;
1743
1744         for (i = 0; i != hash_rxq_init_n; ++i) {
1745                 uint64_t hash_fields;
1746
1747                 if (!parser->queue[i].ibv_attr)
1748                         continue;
1749                 flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr;
1750                 parser->queue[i].ibv_attr = NULL;
1751                 hash_fields = hash_rxq_init[i].hash_fields;
1752                 if (!priv->dev->data->dev_started)
1753                         continue;
1754                 flow->frxq[i].hrxq =
1755                         mlx5_priv_hrxq_get(priv,
1756                                            parser->rss_conf.rss_key,
1757                                            parser->rss_conf.rss_key_len,
1758                                            hash_fields,
1759                                            parser->queues,
1760                                            parser->queues_n);
1761                 if (flow->frxq[i].hrxq)
1762                         continue;
1763                 flow->frxq[i].hrxq =
1764                         mlx5_priv_hrxq_new(priv,
1765                                            parser->rss_conf.rss_key,
1766                                            parser->rss_conf.rss_key_len,
1767                                            hash_fields,
1768                                            parser->queues,
1769                                            parser->queues_n);
1770                 if (!flow->frxq[i].hrxq) {
1771                         rte_flow_error_set(error, ENOMEM,
1772                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1773                                            NULL, "cannot create hash rxq");
1774                         return ENOMEM;
1775                 }
1776         }
1777         return 0;
1778 }
1779
1780 /**
1781  * Complete flow rule creation.
1782  *
1783  * @param priv
1784  *   Pointer to private structure.
1785  * @param parser
1786  *   Internal parser structure.
1787  * @param flow
1788  *   Pointer to the rte_flow.
1789  * @param[out] error
1790  *   Perform verbose error reporting if not NULL.
1791  *
1792  * @return
1793  *   0 on success, a errno value otherwise and rte_errno is set.
1794  */
1795 static int
1796 priv_flow_create_action_queue(struct priv *priv,
1797                               struct mlx5_flow_parse *parser,
1798                               struct rte_flow *flow,
1799                               struct rte_flow_error *error)
1800 {
1801         int err = 0;
1802         unsigned int i;
1803
1804         assert(priv->pd);
1805         assert(priv->ctx);
1806         assert(!parser->drop);
1807         err = priv_flow_create_action_queue_rss(priv, parser, flow, error);
1808         if (err)
1809                 goto error;
1810         if (parser->count)
1811                 flow->cs = parser->cs;
1812         if (!priv->dev->data->dev_started)
1813                 return 0;
1814         for (i = 0; i != hash_rxq_init_n; ++i) {
1815                 if (!flow->frxq[i].hrxq)
1816                         continue;
1817                 flow->frxq[i].ibv_flow =
1818                         mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
1819                                                flow->frxq[i].ibv_attr);
1820                 if (!flow->frxq[i].ibv_flow) {
1821                         rte_flow_error_set(error, ENOMEM,
1822                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1823                                            NULL, "flow rule creation failure");
1824                         err = ENOMEM;
1825                         goto error;
1826                 }
1827                 DEBUG("%p type %d QP %p ibv_flow %p",
1828                       (void *)flow, i,
1829                       (void *)flow->frxq[i].hrxq,
1830                       (void *)flow->frxq[i].ibv_flow);
1831         }
1832         for (i = 0; i != parser->queues_n; ++i) {
1833                 struct mlx5_rxq_data *q =
1834                         (*priv->rxqs)[parser->queues[i]];
1835
1836                 q->mark |= parser->mark;
1837         }
1838         return 0;
1839 error:
1840         assert(flow);
1841         for (i = 0; i != hash_rxq_init_n; ++i) {
1842                 if (flow->frxq[i].ibv_flow) {
1843                         struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow;
1844
1845                         claim_zero(mlx5_glue->destroy_flow(ibv_flow));
1846                 }
1847                 if (flow->frxq[i].hrxq)
1848                         mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
1849                 if (flow->frxq[i].ibv_attr)
1850                         rte_free(flow->frxq[i].ibv_attr);
1851         }
1852         if (flow->cs) {
1853                 claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
1854                 flow->cs = NULL;
1855                 parser->cs = NULL;
1856         }
1857         return err;
1858 }
1859
1860 /**
1861  * Convert a flow.
1862  *
1863  * @param priv
1864  *   Pointer to private structure.
1865  * @param list
1866  *   Pointer to a TAILQ flow list.
1867  * @param[in] attr
1868  *   Flow rule attributes.
1869  * @param[in] pattern
1870  *   Pattern specification (list terminated by the END pattern item).
1871  * @param[in] actions
1872  *   Associated actions (list terminated by the END action).
1873  * @param[out] error
1874  *   Perform verbose error reporting if not NULL.
1875  *
1876  * @return
1877  *   A flow on success, NULL otherwise.
1878  */
1879 static struct rte_flow *
1880 priv_flow_create(struct priv *priv,
1881                  struct mlx5_flows *list,
1882                  const struct rte_flow_attr *attr,
1883                  const struct rte_flow_item items[],
1884                  const struct rte_flow_action actions[],
1885                  struct rte_flow_error *error)
1886 {
1887         struct mlx5_flow_parse parser = { .create = 1, };
1888         struct rte_flow *flow = NULL;
1889         unsigned int i;
1890         int err;
1891
1892         err = priv_flow_convert(priv, attr, items, actions, error, &parser);
1893         if (err)
1894                 goto exit;
1895         flow = rte_calloc(__func__, 1,
1896                           sizeof(*flow) + parser.queues_n * sizeof(uint16_t),
1897                           0);
1898         if (!flow) {
1899                 rte_flow_error_set(error, ENOMEM,
1900                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1901                                    NULL,
1902                                    "cannot allocate flow memory");
1903                 return NULL;
1904         }
1905         /* Copy queues configuration. */
1906         flow->queues = (uint16_t (*)[])(flow + 1);
1907         memcpy(flow->queues, parser.queues, parser.queues_n * sizeof(uint16_t));
1908         flow->queues_n = parser.queues_n;
1909         flow->mark = parser.mark;
1910         /* Copy RSS configuration. */
1911         flow->rss_conf = parser.rss_conf;
1912         flow->rss_conf.rss_key = flow->rss_key;
1913         memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
1914         /* finalise the flow. */
1915         if (parser.drop)
1916                 err = priv_flow_create_action_queue_drop(priv, &parser, flow,
1917                                                          error);
1918         else
1919                 err = priv_flow_create_action_queue(priv, &parser, flow, error);
1920         if (err)
1921                 goto exit;
1922         TAILQ_INSERT_TAIL(list, flow, next);
1923         DEBUG("Flow created %p", (void *)flow);
1924         return flow;
1925 exit:
1926         for (i = 0; i != hash_rxq_init_n; ++i) {
1927                 if (parser.queue[i].ibv_attr)
1928                         rte_free(parser.queue[i].ibv_attr);
1929         }
1930         rte_free(flow);
1931         return NULL;
1932 }
1933
1934 /**
1935  * Validate a flow supported by the NIC.
1936  *
1937  * @see rte_flow_validate()
1938  * @see rte_flow_ops
1939  */
1940 int
1941 mlx5_flow_validate(struct rte_eth_dev *dev,
1942                    const struct rte_flow_attr *attr,
1943                    const struct rte_flow_item items[],
1944                    const struct rte_flow_action actions[],
1945                    struct rte_flow_error *error)
1946 {
1947         struct priv *priv = dev->data->dev_private;
1948         int ret;
1949         struct mlx5_flow_parse parser = { .create = 0, };
1950
1951         priv_lock(priv);
1952         ret = priv_flow_convert(priv, attr, items, actions, error, &parser);
1953         priv_unlock(priv);
1954         return ret;
1955 }
1956
1957 /**
1958  * Create a flow.
1959  *
1960  * @see rte_flow_create()
1961  * @see rte_flow_ops
1962  */
1963 struct rte_flow *
1964 mlx5_flow_create(struct rte_eth_dev *dev,
1965                  const struct rte_flow_attr *attr,
1966                  const struct rte_flow_item items[],
1967                  const struct rte_flow_action actions[],
1968                  struct rte_flow_error *error)
1969 {
1970         struct priv *priv = dev->data->dev_private;
1971         struct rte_flow *flow;
1972
1973         priv_lock(priv);
1974         flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
1975                                 error);
1976         priv_unlock(priv);
1977         return flow;
1978 }
1979
1980 /**
1981  * Destroy a flow.
1982  *
1983  * @param priv
1984  *   Pointer to private structure.
1985  * @param list
1986  *   Pointer to a TAILQ flow list.
1987  * @param[in] flow
1988  *   Flow to destroy.
1989  */
1990 static void
1991 priv_flow_destroy(struct priv *priv,
1992                   struct mlx5_flows *list,
1993                   struct rte_flow *flow)
1994 {
1995         unsigned int i;
1996
1997         if (flow->drop || !flow->mark)
1998                 goto free;
1999         for (i = 0; i != flow->queues_n; ++i) {
2000                 struct rte_flow *tmp;
2001                 int mark = 0;
2002
2003                 /*
2004                  * To remove the mark from the queue, the queue must not be
2005                  * present in any other marked flow (RSS or not).
2006                  */
2007                 TAILQ_FOREACH(tmp, list, next) {
2008                         unsigned int j;
2009                         uint16_t *tqs = NULL;
2010                         uint16_t tq_n = 0;
2011
2012                         if (!tmp->mark)
2013                                 continue;
2014                         for (j = 0; j != hash_rxq_init_n; ++j) {
2015                                 if (!tmp->frxq[j].hrxq)
2016                                         continue;
2017                                 tqs = tmp->frxq[j].hrxq->ind_table->queues;
2018                                 tq_n = tmp->frxq[j].hrxq->ind_table->queues_n;
2019                         }
2020                         if (!tq_n)
2021                                 continue;
2022                         for (j = 0; (j != tq_n) && !mark; j++)
2023                                 if (tqs[j] == (*flow->queues)[i])
2024                                         mark = 1;
2025                 }
2026                 (*priv->rxqs)[(*flow->queues)[i]]->mark = mark;
2027         }
2028 free:
2029         if (flow->drop) {
2030                 if (flow->frxq[HASH_RXQ_ETH].ibv_flow)
2031                         claim_zero(mlx5_glue->destroy_flow
2032                                    (flow->frxq[HASH_RXQ_ETH].ibv_flow));
2033                 rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr);
2034         } else {
2035                 for (i = 0; i != hash_rxq_init_n; ++i) {
2036                         struct mlx5_flow *frxq = &flow->frxq[i];
2037
2038                         if (frxq->ibv_flow)
2039                                 claim_zero(mlx5_glue->destroy_flow
2040                                            (frxq->ibv_flow));
2041                         if (frxq->hrxq)
2042                                 mlx5_priv_hrxq_release(priv, frxq->hrxq);
2043                         if (frxq->ibv_attr)
2044                                 rte_free(frxq->ibv_attr);
2045                 }
2046         }
2047         if (flow->cs) {
2048                 claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
2049                 flow->cs = NULL;
2050         }
2051         TAILQ_REMOVE(list, flow, next);
2052         DEBUG("Flow destroyed %p", (void *)flow);
2053         rte_free(flow);
2054 }
2055
2056 /**
2057  * Destroy all flows.
2058  *
2059  * @param priv
2060  *   Pointer to private structure.
2061  * @param list
2062  *   Pointer to a TAILQ flow list.
2063  */
2064 void
2065 priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
2066 {
2067         while (!TAILQ_EMPTY(list)) {
2068                 struct rte_flow *flow;
2069
2070                 flow = TAILQ_FIRST(list);
2071                 priv_flow_destroy(priv, list, flow);
2072         }
2073 }
2074
2075 /**
2076  * Create drop queue.
2077  *
2078  * @param priv
2079  *   Pointer to private structure.
2080  *
2081  * @return
2082  *   0 on success.
2083  */
2084 int
2085 priv_flow_create_drop_queue(struct priv *priv)
2086 {
2087         struct mlx5_hrxq_drop *fdq = NULL;
2088
2089         assert(priv->pd);
2090         assert(priv->ctx);
2091         fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
2092         if (!fdq) {
2093                 WARN("cannot allocate memory for drop queue");
2094                 goto error;
2095         }
2096         fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
2097         if (!fdq->cq) {
2098                 WARN("cannot allocate CQ for drop queue");
2099                 goto error;
2100         }
2101         fdq->wq = mlx5_glue->create_wq
2102                 (priv->ctx,
2103                  &(struct ibv_wq_init_attr){
2104                         .wq_type = IBV_WQT_RQ,
2105                         .max_wr = 1,
2106                         .max_sge = 1,
2107                         .pd = priv->pd,
2108                         .cq = fdq->cq,
2109                  });
2110         if (!fdq->wq) {
2111                 WARN("cannot allocate WQ for drop queue");
2112                 goto error;
2113         }
2114         fdq->ind_table = mlx5_glue->create_rwq_ind_table
2115                 (priv->ctx,
2116                  &(struct ibv_rwq_ind_table_init_attr){
2117                         .log_ind_tbl_size = 0,
2118                         .ind_tbl = &fdq->wq,
2119                         .comp_mask = 0,
2120                  });
2121         if (!fdq->ind_table) {
2122                 WARN("cannot allocate indirection table for drop queue");
2123                 goto error;
2124         }
2125         fdq->qp = mlx5_glue->create_qp_ex
2126                 (priv->ctx,
2127                  &(struct ibv_qp_init_attr_ex){
2128                         .qp_type = IBV_QPT_RAW_PACKET,
2129                         .comp_mask =
2130                                 IBV_QP_INIT_ATTR_PD |
2131                                 IBV_QP_INIT_ATTR_IND_TABLE |
2132                                 IBV_QP_INIT_ATTR_RX_HASH,
2133                         .rx_hash_conf = (struct ibv_rx_hash_conf){
2134                                 .rx_hash_function =
2135                                         IBV_RX_HASH_FUNC_TOEPLITZ,
2136                                 .rx_hash_key_len = rss_hash_default_key_len,
2137                                 .rx_hash_key = rss_hash_default_key,
2138                                 .rx_hash_fields_mask = 0,
2139                                 },
2140                         .rwq_ind_tbl = fdq->ind_table,
2141                         .pd = priv->pd
2142                  });
2143         if (!fdq->qp) {
2144                 WARN("cannot allocate QP for drop queue");
2145                 goto error;
2146         }
2147         priv->flow_drop_queue = fdq;
2148         return 0;
2149 error:
2150         if (fdq->qp)
2151                 claim_zero(mlx5_glue->destroy_qp(fdq->qp));
2152         if (fdq->ind_table)
2153                 claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
2154         if (fdq->wq)
2155                 claim_zero(mlx5_glue->destroy_wq(fdq->wq));
2156         if (fdq->cq)
2157                 claim_zero(mlx5_glue->destroy_cq(fdq->cq));
2158         if (fdq)
2159                 rte_free(fdq);
2160         priv->flow_drop_queue = NULL;
2161         return -1;
2162 }
2163
2164 /**
2165  * Delete drop queue.
2166  *
2167  * @param priv
2168  *   Pointer to private structure.
2169  */
2170 void
2171 priv_flow_delete_drop_queue(struct priv *priv)
2172 {
2173         struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
2174
2175         if (!fdq)
2176                 return;
2177         if (fdq->qp)
2178                 claim_zero(mlx5_glue->destroy_qp(fdq->qp));
2179         if (fdq->ind_table)
2180                 claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
2181         if (fdq->wq)
2182                 claim_zero(mlx5_glue->destroy_wq(fdq->wq));
2183         if (fdq->cq)
2184                 claim_zero(mlx5_glue->destroy_cq(fdq->cq));
2185         rte_free(fdq);
2186         priv->flow_drop_queue = NULL;
2187 }
2188
2189 /**
2190  * Remove all flows.
2191  *
2192  * @param priv
2193  *   Pointer to private structure.
2194  * @param list
2195  *   Pointer to a TAILQ flow list.
2196  */
2197 void
2198 priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
2199 {
2200         struct rte_flow *flow;
2201
2202         TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
2203                 unsigned int i;
2204
2205                 if (flow->drop) {
2206                         if (!flow->frxq[HASH_RXQ_ETH].ibv_flow)
2207                                 continue;
2208                         claim_zero(mlx5_glue->destroy_flow
2209                                    (flow->frxq[HASH_RXQ_ETH].ibv_flow));
2210                         flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
2211                         /* Next flow. */
2212                         continue;
2213                 }
2214                 if (flow->mark) {
2215                         struct mlx5_ind_table_ibv *ind_tbl = NULL;
2216
2217                         for (i = 0; i != hash_rxq_init_n; ++i) {
2218                                 if (!flow->frxq[i].hrxq)
2219                                         continue;
2220                                 ind_tbl = flow->frxq[i].hrxq->ind_table;
2221                         }
2222                         assert(ind_tbl);
2223                         for (i = 0; i != ind_tbl->queues_n; ++i)
2224                                 (*priv->rxqs)[ind_tbl->queues[i]]->mark = 0;
2225                 }
2226                 for (i = 0; i != hash_rxq_init_n; ++i) {
2227                         if (!flow->frxq[i].ibv_flow)
2228                                 continue;
2229                         claim_zero(mlx5_glue->destroy_flow
2230                                    (flow->frxq[i].ibv_flow));
2231                         flow->frxq[i].ibv_flow = NULL;
2232                         mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
2233                         flow->frxq[i].hrxq = NULL;
2234                 }
2235                 DEBUG("Flow %p removed", (void *)flow);
2236         }
2237 }
2238
2239 /**
2240  * Add all flows.
2241  *
2242  * @param priv
2243  *   Pointer to private structure.
2244  * @param list
2245  *   Pointer to a TAILQ flow list.
2246  *
2247  * @return
2248  *   0 on success, a errno value otherwise and rte_errno is set.
2249  */
2250 int
2251 priv_flow_start(struct priv *priv, struct mlx5_flows *list)
2252 {
2253         struct rte_flow *flow;
2254
2255         TAILQ_FOREACH(flow, list, next) {
2256                 unsigned int i;
2257
2258                 if (flow->drop) {
2259                         flow->frxq[HASH_RXQ_ETH].ibv_flow =
2260                                 mlx5_glue->create_flow
2261                                 (priv->flow_drop_queue->qp,
2262                                  flow->frxq[HASH_RXQ_ETH].ibv_attr);
2263                         if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
2264                                 DEBUG("Flow %p cannot be applied",
2265                                       (void *)flow);
2266                                 rte_errno = EINVAL;
2267                                 return rte_errno;
2268                         }
2269                         DEBUG("Flow %p applied", (void *)flow);
2270                         /* Next flow. */
2271                         continue;
2272                 }
2273                 for (i = 0; i != hash_rxq_init_n; ++i) {
2274                         if (!flow->frxq[i].ibv_attr)
2275                                 continue;
2276                         flow->frxq[i].hrxq =
2277                                 mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key,
2278                                                    flow->rss_conf.rss_key_len,
2279                                                    hash_rxq_init[i].hash_fields,
2280                                                    (*flow->queues),
2281                                                    flow->queues_n);
2282                         if (flow->frxq[i].hrxq)
2283                                 goto flow_create;
2284                         flow->frxq[i].hrxq =
2285                                 mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key,
2286                                                    flow->rss_conf.rss_key_len,
2287                                                    hash_rxq_init[i].hash_fields,
2288                                                    (*flow->queues),
2289                                                    flow->queues_n);
2290                         if (!flow->frxq[i].hrxq) {
2291                                 DEBUG("Flow %p cannot be applied",
2292                                       (void *)flow);
2293                                 rte_errno = EINVAL;
2294                                 return rte_errno;
2295                         }
2296 flow_create:
2297                         flow->frxq[i].ibv_flow =
2298                                 mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
2299                                                        flow->frxq[i].ibv_attr);
2300                         if (!flow->frxq[i].ibv_flow) {
2301                                 DEBUG("Flow %p cannot be applied",
2302                                       (void *)flow);
2303                                 rte_errno = EINVAL;
2304                                 return rte_errno;
2305                         }
2306                         DEBUG("Flow %p applied", (void *)flow);
2307                 }
2308                 if (!flow->mark)
2309                         continue;
2310                 for (i = 0; i != flow->queues_n; ++i)
2311                         (*priv->rxqs)[(*flow->queues)[i]]->mark = 1;
2312         }
2313         return 0;
2314 }
2315
2316 /**
2317  * Verify the flow list is empty
2318  *
2319  * @param priv
2320  *  Pointer to private structure.
2321  *
2322  * @return the number of flows not released.
2323  */
2324 int
2325 priv_flow_verify(struct priv *priv)
2326 {
2327         struct rte_flow *flow;
2328         int ret = 0;
2329
2330         TAILQ_FOREACH(flow, &priv->flows, next) {
2331                 DEBUG("%p: flow %p still referenced", (void *)priv,
2332                       (void *)flow);
2333                 ++ret;
2334         }
2335         return ret;
2336 }
2337
2338 /**
2339  * Enable a control flow configured from the control plane.
2340  *
2341  * @param dev
2342  *   Pointer to Ethernet device.
2343  * @param eth_spec
2344  *   An Ethernet flow spec to apply.
2345  * @param eth_mask
2346  *   An Ethernet flow mask to apply.
2347  * @param vlan_spec
2348  *   A VLAN flow spec to apply.
2349  * @param vlan_mask
2350  *   A VLAN flow mask to apply.
2351  *
2352  * @return
2353  *   0 on success.
2354  */
2355 int
2356 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
2357                     struct rte_flow_item_eth *eth_spec,
2358                     struct rte_flow_item_eth *eth_mask,
2359                     struct rte_flow_item_vlan *vlan_spec,
2360                     struct rte_flow_item_vlan *vlan_mask)
2361 {
2362         struct priv *priv = dev->data->dev_private;
2363         const struct rte_flow_attr attr = {
2364                 .ingress = 1,
2365                 .priority = MLX5_CTRL_FLOW_PRIORITY,
2366         };
2367         struct rte_flow_item items[] = {
2368                 {
2369                         .type = RTE_FLOW_ITEM_TYPE_ETH,
2370                         .spec = eth_spec,
2371                         .last = NULL,
2372                         .mask = eth_mask,
2373                 },
2374                 {
2375                         .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
2376                                 RTE_FLOW_ITEM_TYPE_END,
2377                         .spec = vlan_spec,
2378                         .last = NULL,
2379                         .mask = vlan_mask,
2380                 },
2381                 {
2382                         .type = RTE_FLOW_ITEM_TYPE_END,
2383                 },
2384         };
2385         struct rte_flow_action actions[] = {
2386                 {
2387                         .type = RTE_FLOW_ACTION_TYPE_RSS,
2388                 },
2389                 {
2390                         .type = RTE_FLOW_ACTION_TYPE_END,
2391                 },
2392         };
2393         struct rte_flow *flow;
2394         struct rte_flow_error error;
2395         unsigned int i;
2396         union {
2397                 struct rte_flow_action_rss rss;
2398                 struct {
2399                         const struct rte_eth_rss_conf *rss_conf;
2400                         uint16_t num;
2401                         uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
2402                 } local;
2403         } action_rss;
2404
2405         if (!priv->reta_idx_n)
2406                 return EINVAL;
2407         for (i = 0; i != priv->reta_idx_n; ++i)
2408                 action_rss.local.queue[i] = (*priv->reta_idx)[i];
2409         action_rss.local.rss_conf = &priv->rss_conf;
2410         action_rss.local.num = priv->reta_idx_n;
2411         actions[0].conf = (const void *)&action_rss.rss;
2412         flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
2413                                 &error);
2414         if (!flow)
2415                 return rte_errno;
2416         return 0;
2417 }
2418
2419 /**
2420  * Enable a flow control configured from the control plane.
2421  *
2422  * @param dev
2423  *   Pointer to Ethernet device.
2424  * @param eth_spec
2425  *   An Ethernet flow spec to apply.
2426  * @param eth_mask
2427  *   An Ethernet flow mask to apply.
2428  *
2429  * @return
2430  *   0 on success.
2431  */
2432 int
2433 mlx5_ctrl_flow(struct rte_eth_dev *dev,
2434                struct rte_flow_item_eth *eth_spec,
2435                struct rte_flow_item_eth *eth_mask)
2436 {
2437         return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
2438 }
2439
2440 /**
2441  * Destroy a flow.
2442  *
2443  * @see rte_flow_destroy()
2444  * @see rte_flow_ops
2445  */
2446 int
2447 mlx5_flow_destroy(struct rte_eth_dev *dev,
2448                   struct rte_flow *flow,
2449                   struct rte_flow_error *error)
2450 {
2451         struct priv *priv = dev->data->dev_private;
2452
2453         (void)error;
2454         priv_lock(priv);
2455         priv_flow_destroy(priv, &priv->flows, flow);
2456         priv_unlock(priv);
2457         return 0;
2458 }
2459
2460 /**
2461  * Destroy all flows.
2462  *
2463  * @see rte_flow_flush()
2464  * @see rte_flow_ops
2465  */
2466 int
2467 mlx5_flow_flush(struct rte_eth_dev *dev,
2468                 struct rte_flow_error *error)
2469 {
2470         struct priv *priv = dev->data->dev_private;
2471
2472         (void)error;
2473         priv_lock(priv);
2474         priv_flow_flush(priv, &priv->flows);
2475         priv_unlock(priv);
2476         return 0;
2477 }
2478
2479 #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
2480 /**
2481  * Query flow counter.
2482  *
2483  * @param cs
2484  *   the counter set.
2485  * @param counter_value
2486  *   returned data from the counter.
2487  *
2488  * @return
2489  *   0 on success, a errno value otherwise and rte_errno is set.
2490  */
2491 static int
2492 priv_flow_query_count(struct ibv_counter_set *cs,
2493                       struct mlx5_flow_counter_stats *counter_stats,
2494                       struct rte_flow_query_count *query_count,
2495                       struct rte_flow_error *error)
2496 {
2497         uint64_t counters[2];
2498         struct ibv_query_counter_set_attr query_cs_attr = {
2499                 .cs = cs,
2500                 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
2501         };
2502         struct ibv_counter_set_data query_out = {
2503                 .out = counters,
2504                 .outlen = 2 * sizeof(uint64_t),
2505         };
2506         int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
2507
2508         if (res) {
2509                 rte_flow_error_set(error, -res,
2510                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2511                                    NULL,
2512                                    "cannot read counter");
2513                 return -res;
2514         }
2515         query_count->hits_set = 1;
2516         query_count->bytes_set = 1;
2517         query_count->hits = counters[0] - counter_stats->hits;
2518         query_count->bytes = counters[1] - counter_stats->bytes;
2519         if (query_count->reset) {
2520                 counter_stats->hits = counters[0];
2521                 counter_stats->bytes = counters[1];
2522         }
2523         return 0;
2524 }
2525
2526 /**
2527  * Query a flows.
2528  *
2529  * @see rte_flow_query()
2530  * @see rte_flow_ops
2531  */
2532 int
2533 mlx5_flow_query(struct rte_eth_dev *dev,
2534                 struct rte_flow *flow,
2535                 enum rte_flow_action_type action __rte_unused,
2536                 void *data,
2537                 struct rte_flow_error *error)
2538 {
2539         struct priv *priv = dev->data->dev_private;
2540         int res = EINVAL;
2541
2542         priv_lock(priv);
2543         if (flow->cs) {
2544                 res = priv_flow_query_count(flow->cs,
2545                                         &flow->counter_stats,
2546                                         (struct rte_flow_query_count *)data,
2547                                         error);
2548         } else {
2549                 rte_flow_error_set(error, res,
2550                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2551                                    NULL,
2552                                    "no counter found for flow");
2553         }
2554         priv_unlock(priv);
2555         return -res;
2556 }
2557 #endif
2558
2559 /**
2560  * Isolated mode.
2561  *
2562  * @see rte_flow_isolate()
2563  * @see rte_flow_ops
2564  */
2565 int
2566 mlx5_flow_isolate(struct rte_eth_dev *dev,
2567                   int enable,
2568                   struct rte_flow_error *error)
2569 {
2570         struct priv *priv = dev->data->dev_private;
2571
2572         priv_lock(priv);
2573         if (dev->data->dev_started) {
2574                 rte_flow_error_set(error, EBUSY,
2575                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2576                                    NULL,
2577                                    "port must be stopped first");
2578                 priv_unlock(priv);
2579                 return -rte_errno;
2580         }
2581         priv->isolated = !!enable;
2582         if (enable)
2583                 priv->dev->dev_ops = &mlx5_dev_ops_isolate;
2584         else
2585                 priv->dev->dev_ops = &mlx5_dev_ops;
2586         priv_unlock(priv);
2587         return 0;
2588 }
2589
2590 /**
2591  * Convert a flow director filter to a generic flow.
2592  *
2593  * @param priv
2594  *   Private structure.
2595  * @param fdir_filter
2596  *   Flow director filter to add.
2597  * @param attributes
2598  *   Generic flow parameters structure.
2599  *
2600  * @return
2601  *  0 on success, errno value on error.
2602  */
2603 static int
2604 priv_fdir_filter_convert(struct priv *priv,
2605                          const struct rte_eth_fdir_filter *fdir_filter,
2606                          struct mlx5_fdir *attributes)
2607 {
2608         const struct rte_eth_fdir_input *input = &fdir_filter->input;
2609
2610         /* Validate queue number. */
2611         if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
2612                 ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
2613                 return EINVAL;
2614         }
2615         attributes->attr.ingress = 1;
2616         attributes->items[0] = (struct rte_flow_item) {
2617                 .type = RTE_FLOW_ITEM_TYPE_ETH,
2618                 .spec = &attributes->l2,
2619                 .mask = &attributes->l2_mask,
2620         };
2621         switch (fdir_filter->action.behavior) {
2622         case RTE_ETH_FDIR_ACCEPT:
2623                 attributes->actions[0] = (struct rte_flow_action){
2624                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
2625                         .conf = &attributes->queue,
2626                 };
2627                 break;
2628         case RTE_ETH_FDIR_REJECT:
2629                 attributes->actions[0] = (struct rte_flow_action){
2630                         .type = RTE_FLOW_ACTION_TYPE_DROP,
2631                 };
2632                 break;
2633         default:
2634                 ERROR("invalid behavior %d", fdir_filter->action.behavior);
2635                 return ENOTSUP;
2636         }
2637         attributes->queue.index = fdir_filter->action.rx_queue;
2638         switch (fdir_filter->input.flow_type) {
2639         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2640                 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
2641                         .src_addr = input->flow.udp4_flow.ip.src_ip,
2642                         .dst_addr = input->flow.udp4_flow.ip.dst_ip,
2643                         .time_to_live = input->flow.udp4_flow.ip.ttl,
2644                         .type_of_service = input->flow.udp4_flow.ip.tos,
2645                         .next_proto_id = input->flow.udp4_flow.ip.proto,
2646                 };
2647                 attributes->l4.udp.hdr = (struct udp_hdr){
2648                         .src_port = input->flow.udp4_flow.src_port,
2649                         .dst_port = input->flow.udp4_flow.dst_port,
2650                 };
2651                 attributes->items[1] = (struct rte_flow_item){
2652                         .type = RTE_FLOW_ITEM_TYPE_IPV4,
2653                         .spec = &attributes->l3,
2654                         .mask = &attributes->l3,
2655                 };
2656                 attributes->items[2] = (struct rte_flow_item){
2657                         .type = RTE_FLOW_ITEM_TYPE_UDP,
2658                         .spec = &attributes->l4,
2659                         .mask = &attributes->l4,
2660                 };
2661                 break;
2662         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2663                 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
2664                         .src_addr = input->flow.tcp4_flow.ip.src_ip,
2665                         .dst_addr = input->flow.tcp4_flow.ip.dst_ip,
2666                         .time_to_live = input->flow.tcp4_flow.ip.ttl,
2667                         .type_of_service = input->flow.tcp4_flow.ip.tos,
2668                         .next_proto_id = input->flow.tcp4_flow.ip.proto,
2669                 };
2670                 attributes->l4.tcp.hdr = (struct tcp_hdr){
2671                         .src_port = input->flow.tcp4_flow.src_port,
2672                         .dst_port = input->flow.tcp4_flow.dst_port,
2673                 };
2674                 attributes->items[1] = (struct rte_flow_item){
2675                         .type = RTE_FLOW_ITEM_TYPE_IPV4,
2676                         .spec = &attributes->l3,
2677                         .mask = &attributes->l3,
2678                 };
2679                 attributes->items[2] = (struct rte_flow_item){
2680                         .type = RTE_FLOW_ITEM_TYPE_TCP,
2681                         .spec = &attributes->l4,
2682                         .mask = &attributes->l4,
2683                 };
2684                 break;
2685         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2686                 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
2687                         .src_addr = input->flow.ip4_flow.src_ip,
2688                         .dst_addr = input->flow.ip4_flow.dst_ip,
2689                         .time_to_live = input->flow.ip4_flow.ttl,
2690                         .type_of_service = input->flow.ip4_flow.tos,
2691                         .next_proto_id = input->flow.ip4_flow.proto,
2692                 };
2693                 attributes->items[1] = (struct rte_flow_item){
2694                         .type = RTE_FLOW_ITEM_TYPE_IPV4,
2695                         .spec = &attributes->l3,
2696                         .mask = &attributes->l3,
2697                 };
2698                 break;
2699         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2700                 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
2701                         .hop_limits = input->flow.udp6_flow.ip.hop_limits,
2702                         .proto = input->flow.udp6_flow.ip.proto,
2703                 };
2704                 memcpy(attributes->l3.ipv6.hdr.src_addr,
2705                        input->flow.udp6_flow.ip.src_ip,
2706                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2707                 memcpy(attributes->l3.ipv6.hdr.dst_addr,
2708                        input->flow.udp6_flow.ip.dst_ip,
2709                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2710                 attributes->l4.udp.hdr = (struct udp_hdr){
2711                         .src_port = input->flow.udp6_flow.src_port,
2712                         .dst_port = input->flow.udp6_flow.dst_port,
2713                 };
2714                 attributes->items[1] = (struct rte_flow_item){
2715                         .type = RTE_FLOW_ITEM_TYPE_IPV6,
2716                         .spec = &attributes->l3,
2717                         .mask = &attributes->l3,
2718                 };
2719                 attributes->items[2] = (struct rte_flow_item){
2720                         .type = RTE_FLOW_ITEM_TYPE_UDP,
2721                         .spec = &attributes->l4,
2722                         .mask = &attributes->l4,
2723                 };
2724                 break;
2725         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2726                 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
2727                         .hop_limits = input->flow.tcp6_flow.ip.hop_limits,
2728                         .proto = input->flow.tcp6_flow.ip.proto,
2729                 };
2730                 memcpy(attributes->l3.ipv6.hdr.src_addr,
2731                        input->flow.tcp6_flow.ip.src_ip,
2732                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2733                 memcpy(attributes->l3.ipv6.hdr.dst_addr,
2734                        input->flow.tcp6_flow.ip.dst_ip,
2735                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2736                 attributes->l4.tcp.hdr = (struct tcp_hdr){
2737                         .src_port = input->flow.tcp6_flow.src_port,
2738                         .dst_port = input->flow.tcp6_flow.dst_port,
2739                 };
2740                 attributes->items[1] = (struct rte_flow_item){
2741                         .type = RTE_FLOW_ITEM_TYPE_IPV6,
2742                         .spec = &attributes->l3,
2743                         .mask = &attributes->l3,
2744                 };
2745                 attributes->items[2] = (struct rte_flow_item){
2746                         .type = RTE_FLOW_ITEM_TYPE_TCP,
2747                         .spec = &attributes->l4,
2748                         .mask = &attributes->l4,
2749                 };
2750                 break;
2751         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2752                 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
2753                         .hop_limits = input->flow.ipv6_flow.hop_limits,
2754                         .proto = input->flow.ipv6_flow.proto,
2755                 };
2756                 memcpy(attributes->l3.ipv6.hdr.src_addr,
2757                        input->flow.ipv6_flow.src_ip,
2758                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2759                 memcpy(attributes->l3.ipv6.hdr.dst_addr,
2760                        input->flow.ipv6_flow.dst_ip,
2761                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2762                 attributes->items[1] = (struct rte_flow_item){
2763                         .type = RTE_FLOW_ITEM_TYPE_IPV6,
2764                         .spec = &attributes->l3,
2765                         .mask = &attributes->l3,
2766                 };
2767                 break;
2768         default:
2769                 ERROR("invalid flow type%d",
2770                       fdir_filter->input.flow_type);
2771                 return ENOTSUP;
2772         }
2773         return 0;
2774 }
2775
2776 /**
2777  * Add new flow director filter and store it in list.
2778  *
2779  * @param priv
2780  *   Private structure.
2781  * @param fdir_filter
2782  *   Flow director filter to add.
2783  *
2784  * @return
2785  *   0 on success, errno value on failure.
2786  */
2787 static int
2788 priv_fdir_filter_add(struct priv *priv,
2789                      const struct rte_eth_fdir_filter *fdir_filter)
2790 {
2791         struct mlx5_fdir attributes = {
2792                 .attr.group = 0,
2793                 .l2_mask = {
2794                         .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
2795                         .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
2796                         .type = 0,
2797                 },
2798         };
2799         struct mlx5_flow_parse parser = {
2800                 .layer = HASH_RXQ_ETH,
2801         };
2802         struct rte_flow_error error;
2803         struct rte_flow *flow;
2804         int ret;
2805
2806         ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
2807         if (ret)
2808                 return -ret;
2809         ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
2810                                 attributes.actions, &error, &parser);
2811         if (ret)
2812                 return -ret;
2813         flow = priv_flow_create(priv,
2814                                 &priv->flows,
2815                                 &attributes.attr,
2816                                 attributes.items,
2817                                 attributes.actions,
2818                                 &error);
2819         if (flow) {
2820                 DEBUG("FDIR created %p", (void *)flow);
2821                 return 0;
2822         }
2823         return ENOTSUP;
2824 }
2825
2826 /**
2827  * Delete specific filter.
2828  *
2829  * @param priv
2830  *   Private structure.
2831  * @param fdir_filter
2832  *   Filter to be deleted.
2833  *
2834  * @return
2835  *   0 on success, errno value on failure.
2836  */
2837 static int
2838 priv_fdir_filter_delete(struct priv *priv,
2839                         const struct rte_eth_fdir_filter *fdir_filter)
2840 {
2841         struct mlx5_fdir attributes = {
2842                 .attr.group = 0,
2843         };
2844         struct mlx5_flow_parse parser = {
2845                 .create = 1,
2846                 .layer = HASH_RXQ_ETH,
2847         };
2848         struct rte_flow_error error;
2849         struct rte_flow *flow;
2850         unsigned int i;
2851         int ret;
2852
2853         ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
2854         if (ret)
2855                 return -ret;
2856         ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
2857                                 attributes.actions, &error, &parser);
2858         if (ret)
2859                 goto exit;
2860         /*
2861          * Special case for drop action which is only set in the
2862          * specifications when the flow is created.  In this situation the
2863          * drop specification is missing.
2864          */
2865         if (parser.drop) {
2866                 struct ibv_flow_spec_action_drop *drop;
2867
2868                 drop = (void *)((uintptr_t)parser.queue[HASH_RXQ_ETH].ibv_attr +
2869                                 parser.queue[HASH_RXQ_ETH].offset);
2870                 *drop = (struct ibv_flow_spec_action_drop){
2871                         .type = IBV_FLOW_SPEC_ACTION_DROP,
2872                         .size = sizeof(struct ibv_flow_spec_action_drop),
2873                 };
2874                 parser.queue[HASH_RXQ_ETH].ibv_attr->num_of_specs++;
2875         }
2876         TAILQ_FOREACH(flow, &priv->flows, next) {
2877                 struct ibv_flow_attr *attr;
2878                 struct ibv_spec_header *attr_h;
2879                 void *spec;
2880                 struct ibv_flow_attr *flow_attr;
2881                 struct ibv_spec_header *flow_h;
2882                 void *flow_spec;
2883                 unsigned int specs_n;
2884
2885                 attr = parser.queue[HASH_RXQ_ETH].ibv_attr;
2886                 flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr;
2887                 /* Compare first the attributes. */
2888                 if (memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr)))
2889                         continue;
2890                 if (attr->num_of_specs == 0)
2891                         continue;
2892                 spec = (void *)((uintptr_t)attr +
2893                                 sizeof(struct ibv_flow_attr));
2894                 flow_spec = (void *)((uintptr_t)flow_attr +
2895                                      sizeof(struct ibv_flow_attr));
2896                 specs_n = RTE_MIN(attr->num_of_specs, flow_attr->num_of_specs);
2897                 for (i = 0; i != specs_n; ++i) {
2898                         attr_h = spec;
2899                         flow_h = flow_spec;
2900                         if (memcmp(spec, flow_spec,
2901                                    RTE_MIN(attr_h->size, flow_h->size)))
2902                                 goto wrong_flow;
2903                         spec = (void *)((uintptr_t)spec + attr_h->size);
2904                         flow_spec = (void *)((uintptr_t)flow_spec +
2905                                              flow_h->size);
2906                 }
2907                 /* At this point, the flow match. */
2908                 break;
2909 wrong_flow:
2910                 /* The flow does not match. */
2911                 continue;
2912         }
2913         if (flow)
2914                 priv_flow_destroy(priv, &priv->flows, flow);
2915 exit:
2916         for (i = 0; i != hash_rxq_init_n; ++i) {
2917                 if (parser.queue[i].ibv_attr)
2918                         rte_free(parser.queue[i].ibv_attr);
2919         }
2920         return -ret;
2921 }
2922
2923 /**
2924  * Update queue for specific filter.
2925  *
2926  * @param priv
2927  *   Private structure.
2928  * @param fdir_filter
2929  *   Filter to be updated.
2930  *
2931  * @return
2932  *   0 on success, errno value on failure.
2933  */
2934 static int
2935 priv_fdir_filter_update(struct priv *priv,
2936                         const struct rte_eth_fdir_filter *fdir_filter)
2937 {
2938         int ret;
2939
2940         ret = priv_fdir_filter_delete(priv, fdir_filter);
2941         if (ret)
2942                 return ret;
2943         ret = priv_fdir_filter_add(priv, fdir_filter);
2944         return ret;
2945 }
2946
2947 /**
2948  * Flush all filters.
2949  *
2950  * @param priv
2951  *   Private structure.
2952  */
2953 static void
2954 priv_fdir_filter_flush(struct priv *priv)
2955 {
2956         priv_flow_flush(priv, &priv->flows);
2957 }
2958
2959 /**
2960  * Get flow director information.
2961  *
2962  * @param priv
2963  *   Private structure.
2964  * @param[out] fdir_info
2965  *   Resulting flow director information.
2966  */
2967 static void
2968 priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
2969 {
2970         struct rte_eth_fdir_masks *mask =
2971                 &priv->dev->data->dev_conf.fdir_conf.mask;
2972
2973         fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
2974         fdir_info->guarant_spc = 0;
2975         rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
2976         fdir_info->max_flexpayload = 0;
2977         fdir_info->flow_types_mask[0] = 0;
2978         fdir_info->flex_payload_unit = 0;
2979         fdir_info->max_flex_payload_segment_num = 0;
2980         fdir_info->flex_payload_limit = 0;
2981         memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
2982 }
2983
2984 /**
2985  * Deal with flow director operations.
2986  *
2987  * @param priv
2988  *   Pointer to private structure.
2989  * @param filter_op
2990  *   Operation to perform.
2991  * @param arg
2992  *   Pointer to operation-specific structure.
2993  *
2994  * @return
2995  *   0 on success, errno value on failure.
2996  */
2997 static int
2998 priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
2999 {
3000         enum rte_fdir_mode fdir_mode =
3001                 priv->dev->data->dev_conf.fdir_conf.mode;
3002         int ret = 0;
3003
3004         if (filter_op == RTE_ETH_FILTER_NOP)
3005                 return 0;
3006         if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
3007             fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
3008                 ERROR("%p: flow director mode %d not supported",
3009                       (void *)priv, fdir_mode);
3010                 return EINVAL;
3011         }
3012         switch (filter_op) {
3013         case RTE_ETH_FILTER_ADD:
3014                 ret = priv_fdir_filter_add(priv, arg);
3015                 break;
3016         case RTE_ETH_FILTER_UPDATE:
3017                 ret = priv_fdir_filter_update(priv, arg);
3018                 break;
3019         case RTE_ETH_FILTER_DELETE:
3020                 ret = priv_fdir_filter_delete(priv, arg);
3021                 break;
3022         case RTE_ETH_FILTER_FLUSH:
3023                 priv_fdir_filter_flush(priv);
3024                 break;
3025         case RTE_ETH_FILTER_INFO:
3026                 priv_fdir_info_get(priv, arg);
3027                 break;
3028         default:
3029                 DEBUG("%p: unknown operation %u", (void *)priv,
3030                       filter_op);
3031                 ret = EINVAL;
3032                 break;
3033         }
3034         return ret;
3035 }
3036
3037 /**
3038  * Manage filter operations.
3039  *
3040  * @param dev
3041  *   Pointer to Ethernet device structure.
3042  * @param filter_type
3043  *   Filter type.
3044  * @param filter_op
3045  *   Operation to perform.
3046  * @param arg
3047  *   Pointer to operation-specific structure.
3048  *
3049  * @return
3050  *   0 on success, negative errno value on failure.
3051  */
3052 int
3053 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
3054                      enum rte_filter_type filter_type,
3055                      enum rte_filter_op filter_op,
3056                      void *arg)
3057 {
3058         int ret = EINVAL;
3059         struct priv *priv = dev->data->dev_private;
3060
3061         switch (filter_type) {
3062         case RTE_ETH_FILTER_GENERIC:
3063                 if (filter_op != RTE_ETH_FILTER_GET)
3064                         return -EINVAL;
3065                 *(const void **)arg = &mlx5_flow_ops;
3066                 return 0;
3067         case RTE_ETH_FILTER_FDIR:
3068                 priv_lock(priv);
3069                 ret = priv_fdir_ctrl_func(priv, filter_op, arg);
3070                 priv_unlock(priv);
3071                 break;
3072         default:
3073                 ERROR("%p: filter type (%d) not supported",
3074                       (void *)dev, filter_type);
3075                 break;
3076         }
3077         return -ret;
3078 }