vdpa/mlx5: support queue update
[dpdk.git] / drivers / net / mlx4 / mlx4_intr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5
6 /**
7  * @file
8  * Interrupts handling for mlx4 driver.
9  */
10
11 #include <errno.h>
12 #include <stdint.h>
13 #include <stdlib.h>
14
15 /* Verbs headers do not support -pedantic. */
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic ignored "-Wpedantic"
18 #endif
19 #include <infiniband/verbs.h>
20 #ifdef PEDANTIC
21 #pragma GCC diagnostic error "-Wpedantic"
22 #endif
23
24 #include <rte_alarm.h>
25 #include <rte_errno.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_io.h>
28 #include <rte_interrupts.h>
29
30 #include "mlx4.h"
31 #include "mlx4_glue.h"
32 #include "mlx4_rxtx.h"
33 #include "mlx4_utils.h"
34
35 static int mlx4_link_status_check(struct mlx4_priv *priv);
36
37 /**
38  * Clean up Rx interrupts handler.
39  *
40  * @param priv
41  *   Pointer to private structure.
42  */
43 static void
44 mlx4_rx_intr_vec_disable(struct mlx4_priv *priv)
45 {
46         struct rte_intr_handle *intr_handle = &priv->intr_handle;
47
48         rte_intr_free_epoll_fd(intr_handle);
49         free(intr_handle->intr_vec);
50         intr_handle->nb_efd = 0;
51         intr_handle->intr_vec = NULL;
52 }
53
54 /**
55  * Allocate queue vector and fill epoll fd list for Rx interrupts.
56  *
57  * @param priv
58  *   Pointer to private structure.
59  *
60  * @return
61  *   0 on success, negative errno value otherwise and rte_errno is set.
62  */
63 static int
64 mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
65 {
66         unsigned int i;
67         unsigned int rxqs_n = ETH_DEV(priv)->data->nb_rx_queues;
68         unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
69         unsigned int count = 0;
70         struct rte_intr_handle *intr_handle = &priv->intr_handle;
71
72         mlx4_rx_intr_vec_disable(priv);
73         intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
74         if (intr_handle->intr_vec == NULL) {
75                 rte_errno = ENOMEM;
76                 ERROR("failed to allocate memory for interrupt vector,"
77                       " Rx interrupts will not be supported");
78                 return -rte_errno;
79         }
80         for (i = 0; i != n; ++i) {
81                 struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
82
83                 /* Skip queues that cannot request interrupts. */
84                 if (!rxq || !rxq->channel) {
85                         /* Use invalid intr_vec[] index to disable entry. */
86                         intr_handle->intr_vec[i] =
87                                 RTE_INTR_VEC_RXTX_OFFSET +
88                                 RTE_MAX_RXTX_INTR_VEC_ID;
89                         continue;
90                 }
91                 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
92                         rte_errno = E2BIG;
93                         ERROR("too many Rx queues for interrupt vector size"
94                               " (%d), Rx interrupts cannot be enabled",
95                               RTE_MAX_RXTX_INTR_VEC_ID);
96                         mlx4_rx_intr_vec_disable(priv);
97                         return -rte_errno;
98                 }
99                 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
100                 intr_handle->efds[count] = rxq->channel->fd;
101                 count++;
102         }
103         if (!count)
104                 mlx4_rx_intr_vec_disable(priv);
105         else
106                 intr_handle->nb_efd = count;
107         return 0;
108 }
109
110 /**
111  * Process scheduled link status check.
112  *
113  * If LSC interrupts are requested, process related callback.
114  *
115  * @param priv
116  *   Pointer to private structure.
117  */
118 static void
119 mlx4_link_status_alarm(struct mlx4_priv *priv)
120 {
121         const struct rte_intr_conf *const intr_conf =
122                 &ETH_DEV(priv)->data->dev_conf.intr_conf;
123
124         MLX4_ASSERT(priv->intr_alarm == 1);
125         priv->intr_alarm = 0;
126         if (intr_conf->lsc && !mlx4_link_status_check(priv))
127                 _rte_eth_dev_callback_process(ETH_DEV(priv),
128                                               RTE_ETH_EVENT_INTR_LSC,
129                                               NULL);
130 }
131
132 /**
133  * Check link status.
134  *
135  * In case of inconsistency, another check is scheduled.
136  *
137  * @param priv
138  *   Pointer to private structure.
139  *
140  * @return
141  *   0 on success (link status is consistent), negative errno value
142  *   otherwise and rte_errno is set.
143  */
144 static int
145 mlx4_link_status_check(struct mlx4_priv *priv)
146 {
147         struct rte_eth_link *link = &ETH_DEV(priv)->data->dev_link;
148         int ret = mlx4_link_update(ETH_DEV(priv), 0);
149
150         if (ret)
151                 return ret;
152         if ((!link->link_speed && link->link_status) ||
153             (link->link_speed && !link->link_status)) {
154                 if (!priv->intr_alarm) {
155                         /* Inconsistent status, check again later. */
156                         ret = rte_eal_alarm_set(MLX4_INTR_ALARM_TIMEOUT,
157                                                 (void (*)(void *))
158                                                 mlx4_link_status_alarm,
159                                                 priv);
160                         if (ret)
161                                 return ret;
162                         priv->intr_alarm = 1;
163                 }
164                 rte_errno = EINPROGRESS;
165                 return -rte_errno;
166         }
167         return 0;
168 }
169
170 /**
171  * Handle interrupts from the NIC.
172  *
173  * @param priv
174  *   Pointer to private structure.
175  */
176 static void
177 mlx4_interrupt_handler(struct mlx4_priv *priv)
178 {
179         enum { LSC, RMV, };
180         static const enum rte_eth_event_type type[] = {
181                 [LSC] = RTE_ETH_EVENT_INTR_LSC,
182                 [RMV] = RTE_ETH_EVENT_INTR_RMV,
183         };
184         uint32_t caught[RTE_DIM(type)] = { 0 };
185         struct ibv_async_event event;
186         const struct rte_intr_conf *const intr_conf =
187                 &ETH_DEV(priv)->data->dev_conf.intr_conf;
188         unsigned int i;
189
190         /* Read all message and acknowledge them. */
191         while (!mlx4_glue->get_async_event(priv->ctx, &event)) {
192                 switch (event.event_type) {
193                 case IBV_EVENT_PORT_ACTIVE:
194                 case IBV_EVENT_PORT_ERR:
195                         if (intr_conf->lsc && !mlx4_link_status_check(priv))
196                                 ++caught[LSC];
197                         break;
198                 case IBV_EVENT_DEVICE_FATAL:
199                         if (intr_conf->rmv)
200                                 ++caught[RMV];
201                         break;
202                 default:
203                         DEBUG("event type %d on physical port %d not handled",
204                               event.event_type, event.element.port_num);
205                 }
206                 mlx4_glue->ack_async_event(&event);
207         }
208         for (i = 0; i != RTE_DIM(caught); ++i)
209                 if (caught[i])
210                         _rte_eth_dev_callback_process(ETH_DEV(priv), type[i],
211                                                       NULL);
212 }
213
214 /**
215  * MLX4 CQ notification .
216  *
217  * @param rxq
218  *   Pointer to receive queue structure.
219  * @param solicited
220  *   Is request solicited or not.
221  */
222 static void
223 mlx4_arm_cq(struct rxq *rxq, int solicited)
224 {
225         struct mlx4_cq *cq = &rxq->mcq;
226         uint64_t doorbell;
227         uint32_t sn = cq->arm_sn & MLX4_CQ_DB_GEQ_N_MASK;
228         uint32_t ci = cq->cons_index & MLX4_CQ_DB_CI_MASK;
229         uint32_t cmd = solicited ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT;
230
231         *cq->arm_db = rte_cpu_to_be_32(sn << 28 | cmd | ci);
232         /*
233          * Make sure that the doorbell record in host memory is
234          * written before ringing the doorbell via PCI MMIO.
235          */
236         rte_wmb();
237         doorbell = sn << 28 | cmd | cq->cqn;
238         doorbell <<= 32;
239         doorbell |= ci;
240         rte_write64(rte_cpu_to_be_64(doorbell), cq->cq_db_reg);
241 }
242
243 /**
244  * Uninstall interrupt handler.
245  *
246  * @param priv
247  *   Pointer to private structure.
248  *
249  * @return
250  *   0 on success, negative errno value otherwise and rte_errno is set.
251  */
252 int
253 mlx4_intr_uninstall(struct mlx4_priv *priv)
254 {
255         int err = rte_errno; /* Make sure rte_errno remains unchanged. */
256
257         if (priv->intr_handle.fd != -1) {
258                 rte_intr_callback_unregister(&priv->intr_handle,
259                                              (void (*)(void *))
260                                              mlx4_interrupt_handler,
261                                              priv);
262                 priv->intr_handle.fd = -1;
263         }
264         rte_eal_alarm_cancel((void (*)(void *))mlx4_link_status_alarm, priv);
265         priv->intr_alarm = 0;
266         mlx4_rxq_intr_disable(priv);
267         rte_errno = err;
268         return 0;
269 }
270
271 /**
272  * Install interrupt handler.
273  *
274  * @param priv
275  *   Pointer to private structure.
276  *
277  * @return
278  *   0 on success, negative errno value otherwise and rte_errno is set.
279  */
280 int
281 mlx4_intr_install(struct mlx4_priv *priv)
282 {
283         const struct rte_intr_conf *const intr_conf =
284                 &ETH_DEV(priv)->data->dev_conf.intr_conf;
285         int rc;
286
287         mlx4_intr_uninstall(priv);
288         if (intr_conf->lsc | intr_conf->rmv) {
289                 priv->intr_handle.fd = priv->ctx->async_fd;
290                 rc = rte_intr_callback_register(&priv->intr_handle,
291                                                 (void (*)(void *))
292                                                 mlx4_interrupt_handler,
293                                                 priv);
294                 if (rc < 0) {
295                         rte_errno = -rc;
296                         goto error;
297                 }
298         }
299         return 0;
300 error:
301         mlx4_intr_uninstall(priv);
302         return -rte_errno;
303 }
304
305 /**
306  * DPDK callback for Rx queue interrupt disable.
307  *
308  * @param dev
309  *   Pointer to Ethernet device structure.
310  * @param idx
311  *   Rx queue index.
312  *
313  * @return
314  *   0 on success, negative errno value otherwise and rte_errno is set.
315  */
316 int
317 mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
318 {
319         struct rxq *rxq = dev->data->rx_queues[idx];
320         struct ibv_cq *ev_cq;
321         void *ev_ctx;
322         int ret;
323
324         if (!rxq || !rxq->channel) {
325                 ret = EINVAL;
326         } else {
327                 ret = mlx4_glue->get_cq_event(rxq->cq->channel, &ev_cq,
328                                               &ev_ctx);
329                 if (ret || ev_cq != rxq->cq)
330                         ret = EINVAL;
331         }
332         if (ret) {
333                 rte_errno = ret;
334                 WARN("unable to disable interrupt on rx queue %d",
335                      idx);
336         } else {
337                 rxq->mcq.arm_sn++;
338                 mlx4_glue->ack_cq_events(rxq->cq, 1);
339         }
340         return -ret;
341 }
342
343 /**
344  * DPDK callback for Rx queue interrupt enable.
345  *
346  * @param dev
347  *   Pointer to Ethernet device structure.
348  * @param idx
349  *   Rx queue index.
350  *
351  * @return
352  *   0 on success, negative errno value otherwise and rte_errno is set.
353  */
354 int
355 mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
356 {
357         struct rxq *rxq = dev->data->rx_queues[idx];
358         int ret = 0;
359
360         if (!rxq || !rxq->channel) {
361                 ret = EINVAL;
362                 rte_errno = ret;
363                 WARN("unable to arm interrupt on rx queue %d", idx);
364         } else {
365                 mlx4_arm_cq(rxq, 0);
366         }
367         return -ret;
368 }
369
370 /**
371  * Enable datapath interrupts.
372  *
373  * @param priv
374  *   Pointer to private structure.
375  *
376  * @return
377  *   0 on success, negative errno value otherwise and rte_errno is set.
378  */
379 int
380 mlx4_rxq_intr_enable(struct mlx4_priv *priv)
381 {
382         const struct rte_intr_conf *const intr_conf =
383                 &ETH_DEV(priv)->data->dev_conf.intr_conf;
384
385         if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
386                 goto error;
387         return 0;
388 error:
389         return -rte_errno;
390 }
391
392 /**
393  * Disable datapath interrupts, keeping other interrupts intact.
394  *
395  * @param priv
396  *   Pointer to private structure.
397  */
398 void
399 mlx4_rxq_intr_disable(struct mlx4_priv *priv)
400 {
401         int err = rte_errno; /* Make sure rte_errno remains unchanged. */
402
403         mlx4_rx_intr_vec_disable(priv);
404         rte_errno = err;
405 }