net/mlx5: support upstream rdma-core
[dpdk.git] / drivers / net / mlx5 / mlx5_rxmode.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stddef.h>
35 #include <errno.h>
36 #include <string.h>
37
38 /* Verbs header. */
39 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
40 #ifdef PEDANTIC
41 #pragma GCC diagnostic ignored "-Wpedantic"
42 #endif
43 #include <infiniband/verbs.h>
44 #ifdef PEDANTIC
45 #pragma GCC diagnostic error "-Wpedantic"
46 #endif
47
48 #include <rte_ethdev.h>
49
50 #include "mlx5.h"
51 #include "mlx5_rxtx.h"
52 #include "mlx5_utils.h"
53
54 /* Initialization data for special flows. */
55 static const struct special_flow_init special_flow_init[] = {
56         [HASH_RXQ_FLOW_TYPE_PROMISC] = {
57                 .dst_mac_val = "\x00\x00\x00\x00\x00\x00",
58                 .dst_mac_mask = "\x00\x00\x00\x00\x00\x00",
59                 .hash_types =
60                         1 << HASH_RXQ_TCPV4 |
61                         1 << HASH_RXQ_UDPV4 |
62                         1 << HASH_RXQ_IPV4 |
63                         1 << HASH_RXQ_TCPV6 |
64                         1 << HASH_RXQ_UDPV6 |
65                         1 << HASH_RXQ_IPV6 |
66                         1 << HASH_RXQ_ETH |
67                         0,
68                 .per_vlan = 0,
69         },
70         [HASH_RXQ_FLOW_TYPE_ALLMULTI] = {
71                 .dst_mac_val = "\x01\x00\x00\x00\x00\x00",
72                 .dst_mac_mask = "\x01\x00\x00\x00\x00\x00",
73                 .hash_types =
74                         1 << HASH_RXQ_UDPV4 |
75                         1 << HASH_RXQ_IPV4 |
76                         1 << HASH_RXQ_UDPV6 |
77                         1 << HASH_RXQ_IPV6 |
78                         1 << HASH_RXQ_ETH |
79                         0,
80                 .per_vlan = 0,
81         },
82         [HASH_RXQ_FLOW_TYPE_BROADCAST] = {
83                 .dst_mac_val = "\xff\xff\xff\xff\xff\xff",
84                 .dst_mac_mask = "\xff\xff\xff\xff\xff\xff",
85                 .hash_types =
86                         1 << HASH_RXQ_UDPV4 |
87                         1 << HASH_RXQ_IPV4 |
88                         1 << HASH_RXQ_UDPV6 |
89                         1 << HASH_RXQ_IPV6 |
90                         1 << HASH_RXQ_ETH |
91                         0,
92                 .per_vlan = 1,
93         },
94         [HASH_RXQ_FLOW_TYPE_IPV6MULTI] = {
95                 .dst_mac_val = "\x33\x33\x00\x00\x00\x00",
96                 .dst_mac_mask = "\xff\xff\x00\x00\x00\x00",
97                 .hash_types =
98                         1 << HASH_RXQ_UDPV6 |
99                         1 << HASH_RXQ_IPV6 |
100                         1 << HASH_RXQ_ETH |
101                         0,
102                 .per_vlan = 1,
103         },
104 };
105
106 /**
107  * Enable a special flow in a hash RX queue for a given VLAN index.
108  *
109  * @param hash_rxq
110  *   Pointer to hash RX queue structure.
111  * @param flow_type
112  *   Special flow type.
113  * @param vlan_index
114  *   VLAN index to use.
115  *
116  * @return
117  *   0 on success, errno value on failure.
118  */
119 static int
120 hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq,
121                                   enum hash_rxq_flow_type flow_type,
122                                   unsigned int vlan_index)
123 {
124         struct priv *priv = hash_rxq->priv;
125         struct ibv_flow *flow;
126         FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type));
127         struct ibv_flow_attr *attr = &data->attr;
128         struct ibv_flow_spec_eth *spec = &data->spec;
129         const uint8_t *mac;
130         const uint8_t *mask;
131         unsigned int vlan_enabled = (priv->vlan_filter_n &&
132                                      special_flow_init[flow_type].per_vlan);
133         unsigned int vlan_id = priv->vlan_filter[vlan_index];
134
135         /* Check if flow is relevant for this hash_rxq. */
136         if (!(special_flow_init[flow_type].hash_types & (1 << hash_rxq->type)))
137                 return 0;
138         /* Check if flow already exists. */
139         if (hash_rxq->special_flow[flow_type][vlan_index] != NULL)
140                 return 0;
141
142         /*
143          * No padding must be inserted by the compiler between attr and spec.
144          * This layout is expected by libibverbs.
145          */
146         assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
147         priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type);
148         /* The first specification must be Ethernet. */
149         assert(spec->type == IBV_FLOW_SPEC_ETH);
150         assert(spec->size == sizeof(*spec));
151
152         mac = special_flow_init[flow_type].dst_mac_val;
153         mask = special_flow_init[flow_type].dst_mac_mask;
154         *spec = (struct ibv_flow_spec_eth){
155                 .type = IBV_FLOW_SPEC_ETH,
156                 .size = sizeof(*spec),
157                 .val = {
158                         .dst_mac = {
159                                 mac[0], mac[1], mac[2],
160                                 mac[3], mac[4], mac[5],
161                         },
162                         .vlan_tag = (vlan_enabled ?
163                                      rte_cpu_to_be_16(vlan_id) :
164                                      0),
165                 },
166                 .mask = {
167                         .dst_mac = {
168                                 mask[0], mask[1], mask[2],
169                                 mask[3], mask[4], mask[5],
170                         },
171                         .vlan_tag = (vlan_enabled ?
172                                      rte_cpu_to_be_16(0xfff) :
173                                      0),
174                 },
175         };
176
177         errno = 0;
178         flow = ibv_create_flow(hash_rxq->qp, attr);
179         if (flow == NULL) {
180                 /* It's not clear whether errno is always set in this case. */
181                 ERROR("%p: flow configuration failed, errno=%d: %s",
182                       (void *)hash_rxq, errno,
183                       (errno ? strerror(errno) : "Unknown error"));
184                 if (errno)
185                         return errno;
186                 return EINVAL;
187         }
188         hash_rxq->special_flow[flow_type][vlan_index] = flow;
189         DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) enabled",
190               (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
191               vlan_id, vlan_index);
192         return 0;
193 }
194
195 /**
196  * Disable a special flow in a hash RX queue for a given VLAN index.
197  *
198  * @param hash_rxq
199  *   Pointer to hash RX queue structure.
200  * @param flow_type
201  *   Special flow type.
202  * @param vlan_index
203  *   VLAN index to use.
204  */
205 static void
206 hash_rxq_special_flow_disable_vlan(struct hash_rxq *hash_rxq,
207                                    enum hash_rxq_flow_type flow_type,
208                                    unsigned int vlan_index)
209 {
210         struct ibv_flow *flow =
211                 hash_rxq->special_flow[flow_type][vlan_index];
212
213         if (flow == NULL)
214                 return;
215         claim_zero(ibv_destroy_flow(flow));
216         hash_rxq->special_flow[flow_type][vlan_index] = NULL;
217         DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) disabled",
218               (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
219               hash_rxq->priv->vlan_filter[vlan_index], vlan_index);
220 }
221
222 /**
223  * Enable a special flow in a hash RX queue.
224  *
225  * @param hash_rxq
226  *   Pointer to hash RX queue structure.
227  * @param flow_type
228  *   Special flow type.
229  * @param vlan_index
230  *   VLAN index to use.
231  *
232  * @return
233  *   0 on success, errno value on failure.
234  */
235 static int
236 hash_rxq_special_flow_enable(struct hash_rxq *hash_rxq,
237                              enum hash_rxq_flow_type flow_type)
238 {
239         struct priv *priv = hash_rxq->priv;
240         unsigned int i = 0;
241         int ret;
242
243         assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
244         assert(RTE_DIM(hash_rxq->special_flow[flow_type]) ==
245                RTE_DIM(priv->vlan_filter));
246         /* Add a special flow for each VLAN filter when relevant. */
247         do {
248                 ret = hash_rxq_special_flow_enable_vlan(hash_rxq, flow_type, i);
249                 if (ret) {
250                         /* Failure, rollback. */
251                         while (i != 0)
252                                 hash_rxq_special_flow_disable_vlan(hash_rxq,
253                                                                    flow_type,
254                                                                    --i);
255                         return ret;
256                 }
257         } while (special_flow_init[flow_type].per_vlan &&
258                  ++i < priv->vlan_filter_n);
259         return 0;
260 }
261
262 /**
263  * Disable a special flow in a hash RX queue.
264  *
265  * @param hash_rxq
266  *   Pointer to hash RX queue structure.
267  * @param flow_type
268  *   Special flow type.
269  */
270 static void
271 hash_rxq_special_flow_disable(struct hash_rxq *hash_rxq,
272                               enum hash_rxq_flow_type flow_type)
273 {
274         unsigned int i;
275
276         assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
277         for (i = 0; (i != RTE_DIM(hash_rxq->special_flow[flow_type])); ++i)
278                 hash_rxq_special_flow_disable_vlan(hash_rxq, flow_type, i);
279 }
280
281 /**
282  * Enable a special flow in all hash RX queues.
283  *
284  * @param priv
285  *   Private structure.
286  * @param flow_type
287  *   Special flow type.
288  *
289  * @return
290  *   0 on success, errno value on failure.
291  */
292 int
293 priv_special_flow_enable(struct priv *priv, enum hash_rxq_flow_type flow_type)
294 {
295         unsigned int i;
296
297         if (!priv_allow_flow_type(priv, flow_type))
298                 return 0;
299         for (i = 0; (i != priv->hash_rxqs_n); ++i) {
300                 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
301                 int ret;
302
303                 ret = hash_rxq_special_flow_enable(hash_rxq, flow_type);
304                 if (!ret)
305                         continue;
306                 /* Failure, rollback. */
307                 while (i != 0) {
308                         hash_rxq = &(*priv->hash_rxqs)[--i];
309                         hash_rxq_special_flow_disable(hash_rxq, flow_type);
310                 }
311                 return ret;
312         }
313         return 0;
314 }
315
316 /**
317  * Disable a special flow in all hash RX queues.
318  *
319  * @param priv
320  *   Private structure.
321  * @param flow_type
322  *   Special flow type.
323  */
324 void
325 priv_special_flow_disable(struct priv *priv, enum hash_rxq_flow_type flow_type)
326 {
327         unsigned int i;
328
329         for (i = 0; (i != priv->hash_rxqs_n); ++i) {
330                 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
331
332                 hash_rxq_special_flow_disable(hash_rxq, flow_type);
333         }
334 }
335
336 /**
337  * Enable all special flows in all hash RX queues.
338  *
339  * @param priv
340  *   Private structure.
341  */
342 int
343 priv_special_flow_enable_all(struct priv *priv)
344 {
345         enum hash_rxq_flow_type flow_type;
346
347         if (priv->isolated)
348                 return 0;
349         for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
350                         flow_type != HASH_RXQ_FLOW_TYPE_MAC;
351                         ++flow_type) {
352                 int ret;
353
354                 ret = priv_special_flow_enable(priv, flow_type);
355                 if (!ret)
356                         continue;
357                 /* Failure, rollback. */
358                 while (flow_type)
359                         priv_special_flow_disable(priv, --flow_type);
360                 return ret;
361         }
362         return 0;
363 }
364
365 /**
366  * Disable all special flows in all hash RX queues.
367  *
368  * @param priv
369  *   Private structure.
370  */
371 void
372 priv_special_flow_disable_all(struct priv *priv)
373 {
374         enum hash_rxq_flow_type flow_type;
375
376         for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
377                         flow_type != HASH_RXQ_FLOW_TYPE_MAC;
378                         ++flow_type)
379                 priv_special_flow_disable(priv, flow_type);
380 }
381
382 /**
383  * DPDK callback to enable promiscuous mode.
384  *
385  * @param dev
386  *   Pointer to Ethernet device structure.
387  */
388 void
389 mlx5_promiscuous_enable(struct rte_eth_dev *dev)
390 {
391         struct priv *priv = dev->data->dev_private;
392         int ret;
393
394         if (mlx5_is_secondary())
395                 return;
396
397         priv_lock(priv);
398         priv->promisc_req = 1;
399         ret = priv_rehash_flows(priv);
400         if (ret)
401                 ERROR("error while enabling promiscuous mode: %s",
402                       strerror(ret));
403         priv_unlock(priv);
404 }
405
406 /**
407  * DPDK callback to disable promiscuous mode.
408  *
409  * @param dev
410  *   Pointer to Ethernet device structure.
411  */
412 void
413 mlx5_promiscuous_disable(struct rte_eth_dev *dev)
414 {
415         struct priv *priv = dev->data->dev_private;
416         int ret;
417
418         if (mlx5_is_secondary())
419                 return;
420
421         priv_lock(priv);
422         priv->promisc_req = 0;
423         ret = priv_rehash_flows(priv);
424         if (ret)
425                 ERROR("error while disabling promiscuous mode: %s",
426                       strerror(ret));
427         priv_unlock(priv);
428 }
429
430 /**
431  * DPDK callback to enable allmulti mode.
432  *
433  * @param dev
434  *   Pointer to Ethernet device structure.
435  */
436 void
437 mlx5_allmulticast_enable(struct rte_eth_dev *dev)
438 {
439         struct priv *priv = dev->data->dev_private;
440         int ret;
441
442         if (mlx5_is_secondary())
443                 return;
444
445         priv_lock(priv);
446         priv->allmulti_req = 1;
447         ret = priv_rehash_flows(priv);
448         if (ret)
449                 ERROR("error while enabling allmulticast mode: %s",
450                       strerror(ret));
451         priv_unlock(priv);
452 }
453
454 /**
455  * DPDK callback to disable allmulti mode.
456  *
457  * @param dev
458  *   Pointer to Ethernet device structure.
459  */
460 void
461 mlx5_allmulticast_disable(struct rte_eth_dev *dev)
462 {
463         struct priv *priv = dev->data->dev_private;
464         int ret;
465
466         if (mlx5_is_secondary())
467                 return;
468
469         priv_lock(priv);
470         priv->allmulti_req = 0;
471         ret = priv_rehash_flows(priv);
472         if (ret)
473                 ERROR("error while disabling allmulticast mode: %s",
474                       strerror(ret));
475         priv_unlock(priv);
476 }