net/mlx4: fix unhandled event debug message
[dpdk.git] / drivers / net / mlx4 / mlx4_intr.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 /**
35  * @file
36  * Interrupts handling for mlx4 driver.
37  */
38
39 #include <assert.h>
40 #include <errno.h>
41 #include <stdint.h>
42 #include <stdlib.h>
43
44 /* Verbs headers do not support -pedantic. */
45 #ifdef PEDANTIC
46 #pragma GCC diagnostic ignored "-Wpedantic"
47 #endif
48 #include <infiniband/verbs.h>
49 #ifdef PEDANTIC
50 #pragma GCC diagnostic error "-Wpedantic"
51 #endif
52
53 #include <rte_alarm.h>
54 #include <rte_errno.h>
55 #include <rte_ethdev.h>
56 #include <rte_interrupts.h>
57
58 #include "mlx4.h"
59 #include "mlx4_rxtx.h"
60 #include "mlx4_utils.h"
61
62 static void mlx4_link_status_alarm(struct priv *priv);
63
64 /**
65  * Clean up Rx interrupts handler.
66  *
67  * @param priv
68  *   Pointer to private structure.
69  */
70 static void
71 mlx4_rx_intr_vec_disable(struct priv *priv)
72 {
73         struct rte_intr_handle *intr_handle = &priv->intr_handle;
74
75         rte_intr_free_epoll_fd(intr_handle);
76         free(intr_handle->intr_vec);
77         intr_handle->nb_efd = 0;
78         intr_handle->intr_vec = NULL;
79 }
80
81 /**
82  * Allocate queue vector and fill epoll fd list for Rx interrupts.
83  *
84  * @param priv
85  *   Pointer to private structure.
86  *
87  * @return
88  *   0 on success, negative errno value otherwise and rte_errno is set.
89  */
90 static int
91 mlx4_rx_intr_vec_enable(struct priv *priv)
92 {
93         unsigned int i;
94         unsigned int rxqs_n = priv->dev->data->nb_rx_queues;
95         unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
96         unsigned int count = 0;
97         struct rte_intr_handle *intr_handle = &priv->intr_handle;
98
99         mlx4_rx_intr_vec_disable(priv);
100         intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
101         if (intr_handle->intr_vec == NULL) {
102                 rte_errno = ENOMEM;
103                 ERROR("failed to allocate memory for interrupt vector,"
104                       " Rx interrupts will not be supported");
105                 return -rte_errno;
106         }
107         for (i = 0; i != n; ++i) {
108                 struct rxq *rxq = priv->dev->data->rx_queues[i];
109
110                 /* Skip queues that cannot request interrupts. */
111                 if (!rxq || !rxq->channel) {
112                         /* Use invalid intr_vec[] index to disable entry. */
113                         intr_handle->intr_vec[i] =
114                                 RTE_INTR_VEC_RXTX_OFFSET +
115                                 RTE_MAX_RXTX_INTR_VEC_ID;
116                         continue;
117                 }
118                 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
119                         rte_errno = E2BIG;
120                         ERROR("too many Rx queues for interrupt vector size"
121                               " (%d), Rx interrupts cannot be enabled",
122                               RTE_MAX_RXTX_INTR_VEC_ID);
123                         mlx4_rx_intr_vec_disable(priv);
124                         return -rte_errno;
125                 }
126                 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
127                 intr_handle->efds[count] = rxq->channel->fd;
128                 count++;
129         }
130         if (!count)
131                 mlx4_rx_intr_vec_disable(priv);
132         else
133                 intr_handle->nb_efd = count;
134         return 0;
135 }
136
137 /**
138  * Collect interrupt events.
139  *
140  * @param priv
141  *   Pointer to private structure.
142  * @param events
143  *   Pointer to event flags holder.
144  *
145  * @return
146  *   Number of events.
147  */
148 static int
149 mlx4_collect_interrupt_events(struct priv *priv, uint32_t *events)
150 {
151         struct ibv_async_event event;
152         int port_change = 0;
153         struct rte_eth_link *link = &priv->dev->data->dev_link;
154         const struct rte_intr_conf *const intr_conf =
155                 &priv->dev->data->dev_conf.intr_conf;
156         int ret = 0;
157
158         *events = 0;
159         /* Read all message and acknowledge them. */
160         for (;;) {
161                 if (ibv_get_async_event(priv->ctx, &event))
162                         break;
163                 switch (event.event_type) {
164                 case IBV_EVENT_PORT_ACTIVE:
165                 case IBV_EVENT_PORT_ERR:
166                         if (!intr_conf->lsc)
167                                 break;
168                         port_change = 1;
169                         ret++;
170                         break;
171                 case IBV_EVENT_DEVICE_FATAL:
172                         if (!intr_conf->rmv)
173                                 break;
174                         *events |= (1 << RTE_ETH_EVENT_INTR_RMV);
175                         ret++;
176                         break;
177                 default:
178                         DEBUG("event type %d on port %d not handled",
179                               event.event_type, event.element.port_num);
180                 }
181                 ibv_ack_async_event(&event);
182         }
183         if (!port_change)
184                 return ret;
185         mlx4_link_update(priv->dev, 0);
186         if (((link->link_speed == 0) && link->link_status) ||
187             ((link->link_speed != 0) && !link->link_status)) {
188                 if (!priv->intr_alarm) {
189                         /* Inconsistent status, check again later. */
190                         priv->intr_alarm = 1;
191                         rte_eal_alarm_set(MLX4_INTR_ALARM_TIMEOUT,
192                                           (void (*)(void *))
193                                           mlx4_link_status_alarm,
194                                           priv);
195                 }
196         } else {
197                 *events |= (1 << RTE_ETH_EVENT_INTR_LSC);
198         }
199         return ret;
200 }
201
202 /**
203  * Process scheduled link status check.
204  *
205  * @param priv
206  *   Pointer to private structure.
207  */
208 static void
209 mlx4_link_status_alarm(struct priv *priv)
210 {
211         uint32_t events;
212         int ret;
213
214         assert(priv->intr_alarm == 1);
215         priv->intr_alarm = 0;
216         ret = mlx4_collect_interrupt_events(priv, &events);
217         if (ret > 0 && events & (1 << RTE_ETH_EVENT_INTR_LSC))
218                 _rte_eth_dev_callback_process(priv->dev,
219                                               RTE_ETH_EVENT_INTR_LSC,
220                                               NULL, NULL);
221 }
222
223 /**
224  * Handle interrupts from the NIC.
225  *
226  * @param priv
227  *   Pointer to private structure.
228  */
229 static void
230 mlx4_interrupt_handler(struct priv *priv)
231 {
232         int ret;
233         uint32_t ev;
234         int i;
235
236         ret = mlx4_collect_interrupt_events(priv, &ev);
237         if (ret > 0) {
238                 for (i = RTE_ETH_EVENT_UNKNOWN;
239                      i < RTE_ETH_EVENT_MAX;
240                      i++) {
241                         if (ev & (1 << i)) {
242                                 ev &= ~(1 << i);
243                                 _rte_eth_dev_callback_process(priv->dev, i,
244                                                               NULL, NULL);
245                                 ret--;
246                         }
247                 }
248                 if (ret)
249                         WARN("%d event%s not processed", ret,
250                              (ret > 1 ? "s were" : " was"));
251         }
252 }
253
254 /**
255  * Uninstall interrupt handler.
256  *
257  * @param priv
258  *   Pointer to private structure.
259  *
260  * @return
261  *   0 on success, negative errno value otherwise and rte_errno is set.
262  */
263 int
264 mlx4_intr_uninstall(struct priv *priv)
265 {
266         int err = rte_errno; /* Make sure rte_errno remains unchanged. */
267
268         if (priv->intr_handle.fd != -1) {
269                 rte_intr_callback_unregister(&priv->intr_handle,
270                                              (void (*)(void *))
271                                              mlx4_interrupt_handler,
272                                              priv);
273                 priv->intr_handle.fd = -1;
274         }
275         rte_eal_alarm_cancel((void (*)(void *))mlx4_link_status_alarm, priv);
276         priv->intr_alarm = 0;
277         mlx4_rx_intr_vec_disable(priv);
278         rte_errno = err;
279         return 0;
280 }
281
282 /**
283  * Install interrupt handler.
284  *
285  * @param priv
286  *   Pointer to private structure.
287  *
288  * @return
289  *   0 on success, negative errno value otherwise and rte_errno is set.
290  */
291 int
292 mlx4_intr_install(struct priv *priv)
293 {
294         const struct rte_intr_conf *const intr_conf =
295                 &priv->dev->data->dev_conf.intr_conf;
296         int rc;
297
298         mlx4_intr_uninstall(priv);
299         if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
300                 goto error;
301         if (intr_conf->lsc | intr_conf->rmv) {
302                 priv->intr_handle.fd = priv->ctx->async_fd;
303                 rc = rte_intr_callback_register(&priv->intr_handle,
304                                                 (void (*)(void *))
305                                                 mlx4_interrupt_handler,
306                                                 priv);
307                 if (rc < 0) {
308                         rte_errno = -rc;
309                         goto error;
310                 }
311         }
312         return 0;
313 error:
314         mlx4_intr_uninstall(priv);
315         return -rte_errno;
316 }
317
318 /**
319  * DPDK callback for Rx queue interrupt disable.
320  *
321  * @param dev
322  *   Pointer to Ethernet device structure.
323  * @param idx
324  *   Rx queue index.
325  *
326  * @return
327  *   0 on success, negative errno value otherwise and rte_errno is set.
328  */
329 int
330 mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
331 {
332         struct rxq *rxq = dev->data->rx_queues[idx];
333         struct ibv_cq *ev_cq;
334         void *ev_ctx;
335         int ret;
336
337         if (!rxq || !rxq->channel) {
338                 ret = EINVAL;
339         } else {
340                 ret = ibv_get_cq_event(rxq->cq->channel, &ev_cq, &ev_ctx);
341                 if (ret || ev_cq != rxq->cq)
342                         ret = EINVAL;
343         }
344         if (ret) {
345                 rte_errno = ret;
346                 WARN("unable to disable interrupt on rx queue %d",
347                      idx);
348         } else {
349                 ibv_ack_cq_events(rxq->cq, 1);
350         }
351         return -ret;
352 }
353
354 /**
355  * DPDK callback for Rx queue interrupt enable.
356  *
357  * @param dev
358  *   Pointer to Ethernet device structure.
359  * @param idx
360  *   Rx queue index.
361  *
362  * @return
363  *   0 on success, negative errno value otherwise and rte_errno is set.
364  */
365 int
366 mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
367 {
368         struct rxq *rxq = dev->data->rx_queues[idx];
369         int ret;
370
371         if (!rxq || !rxq->channel)
372                 ret = EINVAL;
373         else
374                 ret = ibv_req_notify_cq(rxq->cq, 0);
375         if (ret) {
376                 rte_errno = ret;
377                 WARN("unable to arm interrupt on rx queue %d", idx);
378         }
379         return -ret;
380 }