net/mlx5: use flow to enable all multi mode
[dpdk.git] / drivers / net / mlx5 / mlx5_rxmode.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stddef.h>
35 #include <errno.h>
36 #include <string.h>
37
38 /* Verbs header. */
39 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
40 #ifdef PEDANTIC
41 #pragma GCC diagnostic ignored "-Wpedantic"
42 #endif
43 #include <infiniband/verbs.h>
44 #ifdef PEDANTIC
45 #pragma GCC diagnostic error "-Wpedantic"
46 #endif
47
48 #include <rte_ethdev.h>
49
50 #include "mlx5.h"
51 #include "mlx5_rxtx.h"
52 #include "mlx5_utils.h"
53
54 /* Initialization data for special flows. */
55 static const struct special_flow_init special_flow_init[] = {
56         [HASH_RXQ_FLOW_TYPE_BROADCAST] = {
57                 .dst_mac_val = "\xff\xff\xff\xff\xff\xff",
58                 .dst_mac_mask = "\xff\xff\xff\xff\xff\xff",
59                 .hash_types =
60                         1 << HASH_RXQ_UDPV4 |
61                         1 << HASH_RXQ_IPV4 |
62                         1 << HASH_RXQ_UDPV6 |
63                         1 << HASH_RXQ_IPV6 |
64                         1 << HASH_RXQ_ETH |
65                         0,
66                 .per_vlan = 1,
67         },
68         [HASH_RXQ_FLOW_TYPE_IPV6MULTI] = {
69                 .dst_mac_val = "\x33\x33\x00\x00\x00\x00",
70                 .dst_mac_mask = "\xff\xff\x00\x00\x00\x00",
71                 .hash_types =
72                         1 << HASH_RXQ_UDPV6 |
73                         1 << HASH_RXQ_IPV6 |
74                         1 << HASH_RXQ_ETH |
75                         0,
76                 .per_vlan = 1,
77         },
78 };
79
80 /**
81  * Enable a special flow in a hash RX queue for a given VLAN index.
82  *
83  * @param hash_rxq
84  *   Pointer to hash RX queue structure.
85  * @param flow_type
86  *   Special flow type.
87  * @param vlan_index
88  *   VLAN index to use.
89  *
90  * @return
91  *   0 on success, errno value on failure.
92  */
93 static int
94 hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq,
95                                   enum hash_rxq_flow_type flow_type,
96                                   unsigned int vlan_index)
97 {
98         struct priv *priv = hash_rxq->priv;
99         struct ibv_flow *flow;
100         FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type));
101         struct ibv_flow_attr *attr = &data->attr;
102         struct ibv_flow_spec_eth *spec = &data->spec;
103         const uint8_t *mac;
104         const uint8_t *mask;
105         unsigned int vlan_enabled = (priv->vlan_filter_n &&
106                                      special_flow_init[flow_type].per_vlan);
107         unsigned int vlan_id = priv->vlan_filter[vlan_index];
108
109         /* Check if flow is relevant for this hash_rxq. */
110         if (!(special_flow_init[flow_type].hash_types & (1 << hash_rxq->type)))
111                 return 0;
112         /* Check if flow already exists. */
113         if (hash_rxq->special_flow[flow_type][vlan_index] != NULL)
114                 return 0;
115
116         /*
117          * No padding must be inserted by the compiler between attr and spec.
118          * This layout is expected by libibverbs.
119          */
120         assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
121         priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type);
122         /* The first specification must be Ethernet. */
123         assert(spec->type == IBV_FLOW_SPEC_ETH);
124         assert(spec->size == sizeof(*spec));
125
126         mac = special_flow_init[flow_type].dst_mac_val;
127         mask = special_flow_init[flow_type].dst_mac_mask;
128         *spec = (struct ibv_flow_spec_eth){
129                 .type = IBV_FLOW_SPEC_ETH,
130                 .size = sizeof(*spec),
131                 .val = {
132                         .dst_mac = {
133                                 mac[0], mac[1], mac[2],
134                                 mac[3], mac[4], mac[5],
135                         },
136                         .vlan_tag = (vlan_enabled ?
137                                      rte_cpu_to_be_16(vlan_id) :
138                                      0),
139                 },
140                 .mask = {
141                         .dst_mac = {
142                                 mask[0], mask[1], mask[2],
143                                 mask[3], mask[4], mask[5],
144                         },
145                         .vlan_tag = (vlan_enabled ?
146                                      rte_cpu_to_be_16(0xfff) :
147                                      0),
148                 },
149         };
150
151         errno = 0;
152         flow = ibv_create_flow(hash_rxq->qp, attr);
153         if (flow == NULL) {
154                 /* It's not clear whether errno is always set in this case. */
155                 ERROR("%p: flow configuration failed, errno=%d: %s",
156                       (void *)hash_rxq, errno,
157                       (errno ? strerror(errno) : "Unknown error"));
158                 if (errno)
159                         return errno;
160                 return EINVAL;
161         }
162         hash_rxq->special_flow[flow_type][vlan_index] = flow;
163         DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) enabled",
164               (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
165               vlan_id, vlan_index);
166         return 0;
167 }
168
169 /**
170  * Disable a special flow in a hash RX queue for a given VLAN index.
171  *
172  * @param hash_rxq
173  *   Pointer to hash RX queue structure.
174  * @param flow_type
175  *   Special flow type.
176  * @param vlan_index
177  *   VLAN index to use.
178  */
179 static void
180 hash_rxq_special_flow_disable_vlan(struct hash_rxq *hash_rxq,
181                                    enum hash_rxq_flow_type flow_type,
182                                    unsigned int vlan_index)
183 {
184         struct ibv_flow *flow =
185                 hash_rxq->special_flow[flow_type][vlan_index];
186
187         if (flow == NULL)
188                 return;
189         claim_zero(ibv_destroy_flow(flow));
190         hash_rxq->special_flow[flow_type][vlan_index] = NULL;
191         DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) disabled",
192               (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
193               hash_rxq->priv->vlan_filter[vlan_index], vlan_index);
194 }
195
196 /**
197  * Enable a special flow in a hash RX queue.
198  *
199  * @param hash_rxq
200  *   Pointer to hash RX queue structure.
201  * @param flow_type
202  *   Special flow type.
203  * @param vlan_index
204  *   VLAN index to use.
205  *
206  * @return
207  *   0 on success, errno value on failure.
208  */
209 static int
210 hash_rxq_special_flow_enable(struct hash_rxq *hash_rxq,
211                              enum hash_rxq_flow_type flow_type)
212 {
213         struct priv *priv = hash_rxq->priv;
214         unsigned int i = 0;
215         int ret;
216
217         assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
218         assert(RTE_DIM(hash_rxq->special_flow[flow_type]) ==
219                RTE_DIM(priv->vlan_filter));
220         /* Add a special flow for each VLAN filter when relevant. */
221         do {
222                 ret = hash_rxq_special_flow_enable_vlan(hash_rxq, flow_type, i);
223                 if (ret) {
224                         /* Failure, rollback. */
225                         while (i != 0)
226                                 hash_rxq_special_flow_disable_vlan(hash_rxq,
227                                                                    flow_type,
228                                                                    --i);
229                         return ret;
230                 }
231         } while (special_flow_init[flow_type].per_vlan &&
232                  ++i < priv->vlan_filter_n);
233         return 0;
234 }
235
236 /**
237  * Disable a special flow in a hash RX queue.
238  *
239  * @param hash_rxq
240  *   Pointer to hash RX queue structure.
241  * @param flow_type
242  *   Special flow type.
243  */
244 static void
245 hash_rxq_special_flow_disable(struct hash_rxq *hash_rxq,
246                               enum hash_rxq_flow_type flow_type)
247 {
248         unsigned int i;
249
250         assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
251         for (i = 0; (i != RTE_DIM(hash_rxq->special_flow[flow_type])); ++i)
252                 hash_rxq_special_flow_disable_vlan(hash_rxq, flow_type, i);
253 }
254
255 /**
256  * Enable a special flow in all hash RX queues.
257  *
258  * @param priv
259  *   Private structure.
260  * @param flow_type
261  *   Special flow type.
262  *
263  * @return
264  *   0 on success, errno value on failure.
265  */
266 int
267 priv_special_flow_enable(struct priv *priv, enum hash_rxq_flow_type flow_type)
268 {
269         unsigned int i;
270
271         if (!priv_allow_flow_type(priv, flow_type))
272                 return 0;
273         for (i = 0; (i != priv->hash_rxqs_n); ++i) {
274                 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
275                 int ret;
276
277                 ret = hash_rxq_special_flow_enable(hash_rxq, flow_type);
278                 if (!ret)
279                         continue;
280                 /* Failure, rollback. */
281                 while (i != 0) {
282                         hash_rxq = &(*priv->hash_rxqs)[--i];
283                         hash_rxq_special_flow_disable(hash_rxq, flow_type);
284                 }
285                 return ret;
286         }
287         return 0;
288 }
289
290 /**
291  * Disable a special flow in all hash RX queues.
292  *
293  * @param priv
294  *   Private structure.
295  * @param flow_type
296  *   Special flow type.
297  */
298 void
299 priv_special_flow_disable(struct priv *priv, enum hash_rxq_flow_type flow_type)
300 {
301         unsigned int i;
302
303         for (i = 0; (i != priv->hash_rxqs_n); ++i) {
304                 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
305
306                 hash_rxq_special_flow_disable(hash_rxq, flow_type);
307         }
308 }
309
310 /**
311  * Enable all special flows in all hash RX queues.
312  *
313  * @param priv
314  *   Private structure.
315  */
316 int
317 priv_special_flow_enable_all(struct priv *priv)
318 {
319         enum hash_rxq_flow_type flow_type;
320
321         if (priv->isolated)
322                 return 0;
323         for (flow_type = HASH_RXQ_FLOW_TYPE_BROADCAST;
324                         flow_type != HASH_RXQ_FLOW_TYPE_MAC;
325                         ++flow_type) {
326                 int ret;
327
328                 ret = priv_special_flow_enable(priv, flow_type);
329                 if (!ret)
330                         continue;
331                 /* Failure, rollback. */
332                 while (flow_type)
333                         priv_special_flow_disable(priv, --flow_type);
334                 return ret;
335         }
336         return 0;
337 }
338
339 /**
340  * Disable all special flows in all hash RX queues.
341  *
342  * @param priv
343  *   Private structure.
344  */
345 void
346 priv_special_flow_disable_all(struct priv *priv)
347 {
348         enum hash_rxq_flow_type flow_type;
349
350         for (flow_type = HASH_RXQ_FLOW_TYPE_BROADCAST;
351                         flow_type != HASH_RXQ_FLOW_TYPE_MAC;
352                         ++flow_type)
353                 priv_special_flow_disable(priv, flow_type);
354 }
355
356 /**
357  * DPDK callback to enable promiscuous mode.
358  *
359  * @param dev
360  *   Pointer to Ethernet device structure.
361  */
362 void
363 mlx5_promiscuous_enable(struct rte_eth_dev *dev)
364 {
365         struct rte_flow_item_eth eth = {
366                 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
367                 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
368                 .type = 0,
369         };
370
371         if (mlx5_is_secondary())
372                 return;
373         dev->data->promiscuous = 1;
374         claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 1));
375 }
376
377 /**
378  * DPDK callback to disable promiscuous mode.
379  *
380  * @param dev
381  *   Pointer to Ethernet device structure.
382  */
383 void
384 mlx5_promiscuous_disable(struct rte_eth_dev *dev)
385 {
386         struct rte_flow_item_eth eth = {
387                 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
388                 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
389                 .type = 0,
390         };
391
392         if (mlx5_is_secondary())
393                 return;
394         dev->data->promiscuous = 0;
395         claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 0));
396 }
397
398 /**
399  * DPDK callback to enable allmulti mode.
400  *
401  * @param dev
402  *   Pointer to Ethernet device structure.
403  */
404 void
405 mlx5_allmulticast_enable(struct rte_eth_dev *dev)
406 {
407         struct rte_flow_item_eth eth = {
408                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
409                 .src.addr_bytes = "\x01\x00\x00\x00\x00\x00",
410                 .type = 0,
411         };
412
413         if (mlx5_is_secondary())
414                 return;
415         dev->data->all_multicast = 1;
416         if (dev->data->dev_started)
417                 claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 1));
418 }
419
420 /**
421  * DPDK callback to disable allmulti mode.
422  *
423  * @param dev
424  *   Pointer to Ethernet device structure.
425  */
426 void
427 mlx5_allmulticast_disable(struct rte_eth_dev *dev)
428 {
429         struct rte_flow_item_eth eth = {
430                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
431                 .src.addr_bytes = "\x01\x00\x00\x00\x00\x00",
432                 .type = 0,
433         };
434
435         if (mlx5_is_secondary())
436                 return;
437         dev->data->all_multicast = 0;
438         if (dev->data->dev_started)
439                 claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 0));
440 }