net/mlx5: remove pedantic pragma
[dpdk.git] / drivers / net / mlx5 / mlx5_rxmode.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2015 6WIND S.A.
5  *   Copyright 2015 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stddef.h>
35 #include <errno.h>
36 #include <string.h>
37
38 /* Verbs header. */
39 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
40 #ifdef PEDANTIC
41 #pragma GCC diagnostic ignored "-Wpedantic"
42 #endif
43 #include <infiniband/verbs.h>
44 #ifdef PEDANTIC
45 #pragma GCC diagnostic error "-Wpedantic"
46 #endif
47
48 #include <rte_ethdev.h>
49
50 #include "mlx5.h"
51 #include "mlx5_rxtx.h"
52 #include "mlx5_utils.h"
53
54 /* Initialization data for special flows. */
55 static const struct special_flow_init special_flow_init[] = {
56         [HASH_RXQ_FLOW_TYPE_PROMISC] = {
57                 .dst_mac_val = "\x00\x00\x00\x00\x00\x00",
58                 .dst_mac_mask = "\x00\x00\x00\x00\x00\x00",
59                 .hash_types =
60                         1 << HASH_RXQ_TCPV4 |
61                         1 << HASH_RXQ_UDPV4 |
62                         1 << HASH_RXQ_IPV4 |
63                         1 << HASH_RXQ_TCPV6 |
64                         1 << HASH_RXQ_UDPV6 |
65                         1 << HASH_RXQ_IPV6 |
66                         1 << HASH_RXQ_ETH |
67                         0,
68                 .per_vlan = 0,
69         },
70         [HASH_RXQ_FLOW_TYPE_ALLMULTI] = {
71                 .dst_mac_val = "\x01\x00\x00\x00\x00\x00",
72                 .dst_mac_mask = "\x01\x00\x00\x00\x00\x00",
73                 .hash_types =
74                         1 << HASH_RXQ_UDPV4 |
75                         1 << HASH_RXQ_IPV4 |
76                         1 << HASH_RXQ_UDPV6 |
77                         1 << HASH_RXQ_IPV6 |
78                         1 << HASH_RXQ_ETH |
79                         0,
80                 .per_vlan = 0,
81         },
82         [HASH_RXQ_FLOW_TYPE_BROADCAST] = {
83                 .dst_mac_val = "\xff\xff\xff\xff\xff\xff",
84                 .dst_mac_mask = "\xff\xff\xff\xff\xff\xff",
85                 .hash_types =
86                         1 << HASH_RXQ_UDPV4 |
87                         1 << HASH_RXQ_IPV4 |
88                         1 << HASH_RXQ_UDPV6 |
89                         1 << HASH_RXQ_IPV6 |
90                         1 << HASH_RXQ_ETH |
91                         0,
92                 .per_vlan = 1,
93         },
94         [HASH_RXQ_FLOW_TYPE_IPV6MULTI] = {
95                 .dst_mac_val = "\x33\x33\x00\x00\x00\x00",
96                 .dst_mac_mask = "\xff\xff\x00\x00\x00\x00",
97                 .hash_types =
98                         1 << HASH_RXQ_UDPV6 |
99                         1 << HASH_RXQ_IPV6 |
100                         1 << HASH_RXQ_ETH |
101                         0,
102                 .per_vlan = 1,
103         },
104 };
105
106 /**
107  * Enable a special flow in a hash RX queue for a given VLAN index.
108  *
109  * @param hash_rxq
110  *   Pointer to hash RX queue structure.
111  * @param flow_type
112  *   Special flow type.
113  * @param vlan_index
114  *   VLAN index to use.
115  *
116  * @return
117  *   0 on success, errno value on failure.
118  */
119 static int
120 hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq,
121                                   enum hash_rxq_flow_type flow_type,
122                                   unsigned int vlan_index)
123 {
124         struct priv *priv = hash_rxq->priv;
125         struct ibv_exp_flow *flow;
126         FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type));
127         struct ibv_exp_flow_attr *attr = &data->attr;
128         struct ibv_exp_flow_spec_eth *spec = &data->spec;
129         const uint8_t *mac;
130         const uint8_t *mask;
131         unsigned int vlan_enabled = (priv->vlan_filter_n &&
132                                      special_flow_init[flow_type].per_vlan);
133         unsigned int vlan_id = priv->vlan_filter[vlan_index];
134
135         /* Check if flow is relevant for this hash_rxq. */
136         if (!(special_flow_init[flow_type].hash_types & (1 << hash_rxq->type)))
137                 return 0;
138         /* Check if flow already exists. */
139         if (hash_rxq->special_flow[flow_type][vlan_index] != NULL)
140                 return 0;
141
142         /*
143          * No padding must be inserted by the compiler between attr and spec.
144          * This layout is expected by libibverbs.
145          */
146         assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
147         priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type);
148         /* The first specification must be Ethernet. */
149         assert(spec->type == IBV_EXP_FLOW_SPEC_ETH);
150         assert(spec->size == sizeof(*spec));
151
152         mac = special_flow_init[flow_type].dst_mac_val;
153         mask = special_flow_init[flow_type].dst_mac_mask;
154         *spec = (struct ibv_exp_flow_spec_eth){
155                 .type = IBV_EXP_FLOW_SPEC_ETH,
156                 .size = sizeof(*spec),
157                 .val = {
158                         .dst_mac = {
159                                 mac[0], mac[1], mac[2],
160                                 mac[3], mac[4], mac[5],
161                         },
162                         .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0),
163                 },
164                 .mask = {
165                         .dst_mac = {
166                                 mask[0], mask[1], mask[2],
167                                 mask[3], mask[4], mask[5],
168                         },
169                         .vlan_tag = (vlan_enabled ? htons(0xfff) : 0),
170                 },
171         };
172
173         errno = 0;
174         flow = ibv_exp_create_flow(hash_rxq->qp, attr);
175         if (flow == NULL) {
176                 /* It's not clear whether errno is always set in this case. */
177                 ERROR("%p: flow configuration failed, errno=%d: %s",
178                       (void *)hash_rxq, errno,
179                       (errno ? strerror(errno) : "Unknown error"));
180                 if (errno)
181                         return errno;
182                 return EINVAL;
183         }
184         hash_rxq->special_flow[flow_type][vlan_index] = flow;
185         DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) enabled",
186               (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
187               vlan_id, vlan_index);
188         return 0;
189 }
190
191 /**
192  * Disable a special flow in a hash RX queue for a given VLAN index.
193  *
194  * @param hash_rxq
195  *   Pointer to hash RX queue structure.
196  * @param flow_type
197  *   Special flow type.
198  * @param vlan_index
199  *   VLAN index to use.
200  */
201 static void
202 hash_rxq_special_flow_disable_vlan(struct hash_rxq *hash_rxq,
203                                    enum hash_rxq_flow_type flow_type,
204                                    unsigned int vlan_index)
205 {
206         struct ibv_exp_flow *flow =
207                 hash_rxq->special_flow[flow_type][vlan_index];
208
209         if (flow == NULL)
210                 return;
211         claim_zero(ibv_exp_destroy_flow(flow));
212         hash_rxq->special_flow[flow_type][vlan_index] = NULL;
213         DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) disabled",
214               (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
215               hash_rxq->priv->vlan_filter[vlan_index], vlan_index);
216 }
217
218 /**
219  * Enable a special flow in a hash RX queue.
220  *
221  * @param hash_rxq
222  *   Pointer to hash RX queue structure.
223  * @param flow_type
224  *   Special flow type.
225  * @param vlan_index
226  *   VLAN index to use.
227  *
228  * @return
229  *   0 on success, errno value on failure.
230  */
231 static int
232 hash_rxq_special_flow_enable(struct hash_rxq *hash_rxq,
233                              enum hash_rxq_flow_type flow_type)
234 {
235         struct priv *priv = hash_rxq->priv;
236         unsigned int i = 0;
237         int ret;
238
239         assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
240         assert(RTE_DIM(hash_rxq->special_flow[flow_type]) ==
241                RTE_DIM(priv->vlan_filter));
242         /* Add a special flow for each VLAN filter when relevant. */
243         do {
244                 ret = hash_rxq_special_flow_enable_vlan(hash_rxq, flow_type, i);
245                 if (ret) {
246                         /* Failure, rollback. */
247                         while (i != 0)
248                                 hash_rxq_special_flow_disable_vlan(hash_rxq,
249                                                                    flow_type,
250                                                                    --i);
251                         return ret;
252                 }
253         } while (special_flow_init[flow_type].per_vlan &&
254                  ++i < priv->vlan_filter_n);
255         return 0;
256 }
257
258 /**
259  * Disable a special flow in a hash RX queue.
260  *
261  * @param hash_rxq
262  *   Pointer to hash RX queue structure.
263  * @param flow_type
264  *   Special flow type.
265  */
266 static void
267 hash_rxq_special_flow_disable(struct hash_rxq *hash_rxq,
268                               enum hash_rxq_flow_type flow_type)
269 {
270         unsigned int i;
271
272         assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
273         for (i = 0; (i != RTE_DIM(hash_rxq->special_flow[flow_type])); ++i)
274                 hash_rxq_special_flow_disable_vlan(hash_rxq, flow_type, i);
275 }
276
277 /**
278  * Enable a special flow in all hash RX queues.
279  *
280  * @param priv
281  *   Private structure.
282  * @param flow_type
283  *   Special flow type.
284  *
285  * @return
286  *   0 on success, errno value on failure.
287  */
288 int
289 priv_special_flow_enable(struct priv *priv, enum hash_rxq_flow_type flow_type)
290 {
291         unsigned int i;
292
293         if (!priv_allow_flow_type(priv, flow_type))
294                 return 0;
295         for (i = 0; (i != priv->hash_rxqs_n); ++i) {
296                 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
297                 int ret;
298
299                 ret = hash_rxq_special_flow_enable(hash_rxq, flow_type);
300                 if (!ret)
301                         continue;
302                 /* Failure, rollback. */
303                 while (i != 0) {
304                         hash_rxq = &(*priv->hash_rxqs)[--i];
305                         hash_rxq_special_flow_disable(hash_rxq, flow_type);
306                 }
307                 return ret;
308         }
309         return 0;
310 }
311
312 /**
313  * Disable a special flow in all hash RX queues.
314  *
315  * @param priv
316  *   Private structure.
317  * @param flow_type
318  *   Special flow type.
319  */
320 void
321 priv_special_flow_disable(struct priv *priv, enum hash_rxq_flow_type flow_type)
322 {
323         unsigned int i;
324
325         for (i = 0; (i != priv->hash_rxqs_n); ++i) {
326                 struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
327
328                 hash_rxq_special_flow_disable(hash_rxq, flow_type);
329         }
330 }
331
332 /**
333  * Enable all special flows in all hash RX queues.
334  *
335  * @param priv
336  *   Private structure.
337  */
338 int
339 priv_special_flow_enable_all(struct priv *priv)
340 {
341         enum hash_rxq_flow_type flow_type;
342
343         if (priv->isolated)
344                 return 0;
345         for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
346                         flow_type != HASH_RXQ_FLOW_TYPE_MAC;
347                         ++flow_type) {
348                 int ret;
349
350                 ret = priv_special_flow_enable(priv, flow_type);
351                 if (!ret)
352                         continue;
353                 /* Failure, rollback. */
354                 while (flow_type)
355                         priv_special_flow_disable(priv, --flow_type);
356                 return ret;
357         }
358         return 0;
359 }
360
361 /**
362  * Disable all special flows in all hash RX queues.
363  *
364  * @param priv
365  *   Private structure.
366  */
367 void
368 priv_special_flow_disable_all(struct priv *priv)
369 {
370         enum hash_rxq_flow_type flow_type;
371
372         for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
373                         flow_type != HASH_RXQ_FLOW_TYPE_MAC;
374                         ++flow_type)
375                 priv_special_flow_disable(priv, flow_type);
376 }
377
378 /**
379  * DPDK callback to enable promiscuous mode.
380  *
381  * @param dev
382  *   Pointer to Ethernet device structure.
383  */
384 void
385 mlx5_promiscuous_enable(struct rte_eth_dev *dev)
386 {
387         struct priv *priv = dev->data->dev_private;
388         int ret;
389
390         if (mlx5_is_secondary())
391                 return;
392
393         priv_lock(priv);
394         priv->promisc_req = 1;
395         ret = priv_rehash_flows(priv);
396         if (ret)
397                 ERROR("error while enabling promiscuous mode: %s",
398                       strerror(ret));
399         priv_unlock(priv);
400 }
401
402 /**
403  * DPDK callback to disable promiscuous mode.
404  *
405  * @param dev
406  *   Pointer to Ethernet device structure.
407  */
408 void
409 mlx5_promiscuous_disable(struct rte_eth_dev *dev)
410 {
411         struct priv *priv = dev->data->dev_private;
412         int ret;
413
414         if (mlx5_is_secondary())
415                 return;
416
417         priv_lock(priv);
418         priv->promisc_req = 0;
419         ret = priv_rehash_flows(priv);
420         if (ret)
421                 ERROR("error while disabling promiscuous mode: %s",
422                       strerror(ret));
423         priv_unlock(priv);
424 }
425
426 /**
427  * DPDK callback to enable allmulti mode.
428  *
429  * @param dev
430  *   Pointer to Ethernet device structure.
431  */
432 void
433 mlx5_allmulticast_enable(struct rte_eth_dev *dev)
434 {
435         struct priv *priv = dev->data->dev_private;
436         int ret;
437
438         if (mlx5_is_secondary())
439                 return;
440
441         priv_lock(priv);
442         priv->allmulti_req = 1;
443         ret = priv_rehash_flows(priv);
444         if (ret)
445                 ERROR("error while enabling allmulticast mode: %s",
446                       strerror(ret));
447         priv_unlock(priv);
448 }
449
450 /**
451  * DPDK callback to disable allmulti mode.
452  *
453  * @param dev
454  *   Pointer to Ethernet device structure.
455  */
456 void
457 mlx5_allmulticast_disable(struct rte_eth_dev *dev)
458 {
459         struct priv *priv = dev->data->dev_private;
460         int ret;
461
462         if (mlx5_is_secondary())
463                 return;
464
465         priv_lock(priv);
466         priv->allmulti_req = 0;
467         ret = priv_rehash_flows(priv);
468         if (ret)
469                 ERROR("error while disabling allmulticast mode: %s",
470                       strerror(ret));
471         priv_unlock(priv);
472 }