net/mlx5: use flow to enable promiscuous mode
[dpdk.git] / drivers / net / mlx5 / mlx5_rxmode.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stddef.h>
35 #include <errno.h>
36 #include <string.h>
37
38 /* Verbs header. */
39 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
40 #ifdef PEDANTIC
41 #pragma GCC diagnostic ignored "-Wpedantic"
42 #endif
43 #include <infiniband/verbs.h>
44 #ifdef PEDANTIC
45 #pragma GCC diagnostic error "-Wpedantic"
46 #endif
47
48 #include <rte_ethdev.h>
49
50 #include "mlx5.h"
51 #include "mlx5_rxtx.h"
52 #include "mlx5_utils.h"
53
54 /* Initialization data for special flows. */
55 static const struct special_flow_init special_flow_init[] = {
56         [HASH_RXQ_FLOW_TYPE_ALLMULTI] = {
57                 .dst_mac_val = "\x01\x00\x00\x00\x00\x00",
58                 .dst_mac_mask = "\x01\x00\x00\x00\x00\x00",
59                 .hash_types =
60                         1 << HASH_RXQ_UDPV4 |
61                         1 << HASH_RXQ_IPV4 |
62                         1 << HASH_RXQ_UDPV6 |
63                         1 << HASH_RXQ_IPV6 |
64                         1 << HASH_RXQ_ETH |
65                         0,
66                 .per_vlan = 0,
67         },
68         [HASH_RXQ_FLOW_TYPE_BROADCAST] = {
69                 .dst_mac_val = "\xff\xff\xff\xff\xff\xff",
70                 .dst_mac_mask = "\xff\xff\xff\xff\xff\xff",
71                 .hash_types =
72                         1 << HASH_RXQ_UDPV4 |
73                         1 << HASH_RXQ_IPV4 |
74                         1 << HASH_RXQ_UDPV6 |
75                         1 << HASH_RXQ_IPV6 |
76                         1 << HASH_RXQ_ETH |
77                         0,
78                 .per_vlan = 1,
79         },
80         [HASH_RXQ_FLOW_TYPE_IPV6MULTI] = {
81                 .dst_mac_val = "\x33\x33\x00\x00\x00\x00",
82                 .dst_mac_mask = "\xff\xff\x00\x00\x00\x00",
83                 .hash_types =
84                         1 << HASH_RXQ_UDPV6 |
85                         1 << HASH_RXQ_IPV6 |
86                         1 << HASH_RXQ_ETH |
87                         0,
88                 .per_vlan = 1,
89         },
90 };
91
92 /**
93  * Enable a special flow in a hash RX queue for a given VLAN index.
94  *
95  * @param hash_rxq
96  *   Pointer to hash RX queue structure.
97  * @param flow_type
98  *   Special flow type.
99  * @param vlan_index
100  *   VLAN index to use.
101  *
102  * @return
103  *   0 on success, errno value on failure.
104  */
105 static int
106 hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq,
107                                   enum hash_rxq_flow_type flow_type,
108                                   unsigned int vlan_index)
109 {
110         struct priv *priv = hash_rxq->priv;
111         struct ibv_flow *flow;
112         FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type));
113         struct ibv_flow_attr *attr = &data->attr;
114         struct ibv_flow_spec_eth *spec = &data->spec;
115         const uint8_t *mac;
116         const uint8_t *mask;
117         unsigned int vlan_enabled = (priv->vlan_filter_n &&
118                                      special_flow_init[flow_type].per_vlan);
119         unsigned int vlan_id = priv->vlan_filter[vlan_index];
120
121         /* Check if flow is relevant for this hash_rxq. */
122         if (!(special_flow_init[flow_type].hash_types & (1 << hash_rxq->type)))
123                 return 0;
124         /* Check if flow already exists. */
125         if (hash_rxq->special_flow[flow_type][vlan_index] != NULL)
126                 return 0;
127
128         /*
129          * No padding must be inserted by the compiler between attr and spec.
130          * This layout is expected by libibverbs.
131          */
132         assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
133         priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type);
134         /* The first specification must be Ethernet. */
135         assert(spec->type == IBV_FLOW_SPEC_ETH);
136         assert(spec->size == sizeof(*spec));
137
138         mac = special_flow_init[flow_type].dst_mac_val;
139         mask = special_flow_init[flow_type].dst_mac_mask;
140         *spec = (struct ibv_flow_spec_eth){
141                 .type = IBV_FLOW_SPEC_ETH,
142                 .size = sizeof(*spec),
143                 .val = {
144                         .dst_mac = {
145                                 mac[0], mac[1], mac[2],
146                                 mac[3], mac[4], mac[5],
147                         },
148                         .vlan_tag = (vlan_enabled ?
149                                      rte_cpu_to_be_16(vlan_id) :
150                                      0),
151                 },
152                 .mask = {
153                         .dst_mac = {
154                                 mask[0], mask[1], mask[2],
155                                 mask[3], mask[4], mask[5],
156                         },
157                         .vlan_tag = (vlan_enabled ?
158                                      rte_cpu_to_be_16(0xfff) :
159                                      0),
160                 },
161         };
162
163         errno = 0;
164         flow = ibv_create_flow(hash_rxq->qp, attr);
165         if (flow == NULL) {
166                 /* It's not clear whether errno is always set in this case. */
167                 ERROR("%p: flow configuration failed, errno=%d: %s",
168                       (void *)hash_rxq, errno,
169                       (errno ? strerror(errno) : "Unknown error"));
170                 if (errno)
171                         return errno;
172                 return EINVAL;
173         }
174         hash_rxq->special_flow[flow_type][vlan_index] = flow;
175         DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) enabled",
176               (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
177               vlan_id, vlan_index);
178         return 0;
179 }
180
181 /**
182  * Disable a special flow in a hash RX queue for a given VLAN index.
183  *
184  * @param hash_rxq
185  *   Pointer to hash RX queue structure.
186  * @param flow_type
187  *   Special flow type.
188  * @param vlan_index
189  *   VLAN index to use.
190  */
191 static void
192 hash_rxq_special_flow_disable_vlan(struct hash_rxq *hash_rxq,
193                                    enum hash_rxq_flow_type flow_type,
194                                    unsigned int vlan_index)
195 {
196         struct ibv_flow *flow =
197                 hash_rxq->special_flow[flow_type][vlan_index];
198
199         if (flow == NULL)
200                 return;
201         claim_zero(ibv_destroy_flow(flow));
202         hash_rxq->special_flow[flow_type][vlan_index] = NULL;
203         DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) disabled",
204               (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
205               hash_rxq->priv->vlan_filter[vlan_index], vlan_index);
206 }
207
208 /**
209  * Enable a special flow in a hash RX queue.
210  *
211  * @param hash_rxq
212  *   Pointer to hash RX queue structure.
213  * @param flow_type
214  *   Special flow type.
215  * @param vlan_index
216  *   VLAN index to use.
217  *
218  * @return
219  *   0 on success, errno value on failure.
220  */
221 static int
222 hash_rxq_special_flow_enable(struct hash_rxq *hash_rxq,
223                              enum hash_rxq_flow_type flow_type)
224 {
225         struct priv *priv = hash_rxq->priv;
226         unsigned int i = 0;
227         int ret;
228
229         assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
230         assert(RTE_DIM(hash_rxq->special_flow[flow_type]) ==
231                RTE_DIM(priv->vlan_filter));
232         /* Add a special flow for each VLAN filter when relevant. */
233         do {
234                 ret = hash_rxq_special_flow_enable_vlan(hash_rxq, flow_type, i);
235                 if (ret) {
236                         /* Failure, rollback. */
237                         while (i != 0)
238                                 hash_rxq_special_flow_disable_vlan(hash_rxq,
239                                                                    flow_type,
240                                                                    --i);
241                         return ret;
242                 }
243         } while (special_flow_init[flow_type].per_vlan &&
244                  ++i < priv->vlan_filter_n);
245         return 0;
246 }
247
248 /**
249  * Disable a special flow in a hash RX queue.
250  *
251  * @param hash_rxq
252  *   Pointer to hash RX queue structure.
253  * @param flow_type
254  *   Special flow type.
255  */
256 static void
257 hash_rxq_special_flow_disable(struct hash_rxq *hash_rxq,
258                               enum hash_rxq_flow_type flow_type)
259 {
260         unsigned int i;
261
262         assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
263         for (i = 0; (i != RTE_DIM(hash_rxq->special_flow[flow_type])); ++i)
264                 hash_rxq_special_flow_disable_vlan(hash_rxq, flow_type, i);
265 }
266
267 /**
268  * Enable a special flow in all hash RX queues.
269  *
270  * @param priv
271  *   Private structure.
272  * @param flow_type
273  *   Special flow type.
274  *
275  * @return
276  *   0 on success, errno value on failure.
277  */
278 int
279 priv_special_flow_enable(struct priv *priv, enum hash_rxq_flow_type flow_type)
280 {
281         unsigned int i;
282
283         if (!priv_allow_flow_type(priv, flow_type))
284                 return 0;
285         for (i = 0; (i != priv->hash_rxqs_n); ++i) {
286                 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
287                 int ret;
288
289                 ret = hash_rxq_special_flow_enable(hash_rxq, flow_type);
290                 if (!ret)
291                         continue;
292                 /* Failure, rollback. */
293                 while (i != 0) {
294                         hash_rxq = &(*priv->hash_rxqs)[--i];
295                         hash_rxq_special_flow_disable(hash_rxq, flow_type);
296                 }
297                 return ret;
298         }
299         return 0;
300 }
301
302 /**
303  * Disable a special flow in all hash RX queues.
304  *
305  * @param priv
306  *   Private structure.
307  * @param flow_type
308  *   Special flow type.
309  */
310 void
311 priv_special_flow_disable(struct priv *priv, enum hash_rxq_flow_type flow_type)
312 {
313         unsigned int i;
314
315         for (i = 0; (i != priv->hash_rxqs_n); ++i) {
316                 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
317
318                 hash_rxq_special_flow_disable(hash_rxq, flow_type);
319         }
320 }
321
322 /**
323  * Enable all special flows in all hash RX queues.
324  *
325  * @param priv
326  *   Private structure.
327  */
328 int
329 priv_special_flow_enable_all(struct priv *priv)
330 {
331         enum hash_rxq_flow_type flow_type;
332
333         if (priv->isolated)
334                 return 0;
335         for (flow_type = HASH_RXQ_FLOW_TYPE_ALLMULTI;
336                         flow_type != HASH_RXQ_FLOW_TYPE_MAC;
337                         ++flow_type) {
338                 int ret;
339
340                 ret = priv_special_flow_enable(priv, flow_type);
341                 if (!ret)
342                         continue;
343                 /* Failure, rollback. */
344                 while (flow_type)
345                         priv_special_flow_disable(priv, --flow_type);
346                 return ret;
347         }
348         return 0;
349 }
350
351 /**
352  * Disable all special flows in all hash RX queues.
353  *
354  * @param priv
355  *   Private structure.
356  */
357 void
358 priv_special_flow_disable_all(struct priv *priv)
359 {
360         enum hash_rxq_flow_type flow_type;
361
362         for (flow_type = HASH_RXQ_FLOW_TYPE_ALLMULTI;
363                         flow_type != HASH_RXQ_FLOW_TYPE_MAC;
364                         ++flow_type)
365                 priv_special_flow_disable(priv, flow_type);
366 }
367
368 /**
369  * DPDK callback to enable promiscuous mode.
370  *
371  * @param dev
372  *   Pointer to Ethernet device structure.
373  */
374 void
375 mlx5_promiscuous_enable(struct rte_eth_dev *dev)
376 {
377         struct rte_flow_item_eth eth = {
378                 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
379                 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
380                 .type = 0,
381         };
382
383         if (mlx5_is_secondary())
384                 return;
385         dev->data->promiscuous = 1;
386         claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 1));
387 }
388
389 /**
390  * DPDK callback to disable promiscuous mode.
391  *
392  * @param dev
393  *   Pointer to Ethernet device structure.
394  */
395 void
396 mlx5_promiscuous_disable(struct rte_eth_dev *dev)
397 {
398         struct rte_flow_item_eth eth = {
399                 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
400                 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
401                 .type = 0,
402         };
403
404         if (mlx5_is_secondary())
405                 return;
406         dev->data->promiscuous = 0;
407         claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 0));
408 }
409
410 /**
411  * DPDK callback to enable allmulti mode.
412  *
413  * @param dev
414  *   Pointer to Ethernet device structure.
415  */
416 void
417 mlx5_allmulticast_enable(struct rte_eth_dev *dev)
418 {
419         struct priv *priv = dev->data->dev_private;
420         int ret;
421
422         if (mlx5_is_secondary())
423                 return;
424
425         priv_lock(priv);
426         priv->allmulti_req = 1;
427         ret = priv_rehash_flows(priv);
428         if (ret)
429                 ERROR("error while enabling allmulticast mode: %s",
430                       strerror(ret));
431         priv_unlock(priv);
432 }
433
434 /**
435  * DPDK callback to disable allmulti mode.
436  *
437  * @param dev
438  *   Pointer to Ethernet device structure.
439  */
440 void
441 mlx5_allmulticast_disable(struct rte_eth_dev *dev)
442 {
443         struct priv *priv = dev->data->dev_private;
444         int ret;
445
446         if (mlx5_is_secondary())
447                 return;
448
449         priv_lock(priv);
450         priv->allmulti_req = 0;
451         ret = priv_rehash_flows(priv);
452         if (ret)
453                 ERROR("error while disabling allmulticast mode: %s",
454                       strerror(ret));
455         priv_unlock(priv);
456 }