4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Interrupts handling for mlx4 driver.
44 /* Verbs headers do not support -pedantic. */
46 #pragma GCC diagnostic ignored "-Wpedantic"
48 #include <infiniband/verbs.h>
50 #pragma GCC diagnostic error "-Wpedantic"
53 #include <rte_alarm.h>
54 #include <rte_errno.h>
55 #include <rte_ethdev.h>
56 #include <rte_interrupts.h>
59 #include "mlx4_utils.h"
61 static void mlx4_link_status_alarm(struct priv *priv);
64 * Clean up Rx interrupts handler.
67 * Pointer to private structure.
70 mlx4_rx_intr_vec_disable(struct priv *priv)
72 struct rte_intr_handle *intr_handle = &priv->intr_handle;
74 rte_intr_free_epoll_fd(intr_handle);
75 free(intr_handle->intr_vec);
76 intr_handle->nb_efd = 0;
77 intr_handle->intr_vec = NULL;
81 * Allocate queue vector and fill epoll fd list for Rx interrupts.
84 * Pointer to private structure.
87 * 0 on success, negative errno value otherwise and rte_errno is set.
90 mlx4_rx_intr_vec_enable(struct priv *priv)
93 unsigned int rxqs_n = priv->rxqs_n;
94 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
95 unsigned int count = 0;
96 struct rte_intr_handle *intr_handle = &priv->intr_handle;
98 mlx4_rx_intr_vec_disable(priv);
99 intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
100 if (intr_handle->intr_vec == NULL) {
102 ERROR("failed to allocate memory for interrupt vector,"
103 " Rx interrupts will not be supported");
106 for (i = 0; i != n; ++i) {
107 struct rxq *rxq = (*priv->rxqs)[i];
109 /* Skip queues that cannot request interrupts. */
110 if (!rxq || !rxq->channel) {
111 /* Use invalid intr_vec[] index to disable entry. */
112 intr_handle->intr_vec[i] =
113 RTE_INTR_VEC_RXTX_OFFSET +
114 RTE_MAX_RXTX_INTR_VEC_ID;
117 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
119 ERROR("too many Rx queues for interrupt vector size"
120 " (%d), Rx interrupts cannot be enabled",
121 RTE_MAX_RXTX_INTR_VEC_ID);
122 mlx4_rx_intr_vec_disable(priv);
125 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
126 intr_handle->efds[count] = rxq->channel->fd;
130 mlx4_rx_intr_vec_disable(priv);
132 intr_handle->nb_efd = count;
137 * Collect interrupt events.
140 * Pointer to private structure.
142 * Pointer to event flags holder.
148 mlx4_collect_interrupt_events(struct priv *priv, uint32_t *events)
150 struct ibv_async_event event;
152 struct rte_eth_link *link = &priv->dev->data->dev_link;
153 const struct rte_intr_conf *const intr_conf =
154 &priv->dev->data->dev_conf.intr_conf;
158 /* Read all message and acknowledge them. */
160 if (ibv_get_async_event(priv->ctx, &event))
162 if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
163 event.event_type == IBV_EVENT_PORT_ERR) &&
167 } else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
169 *events |= (1 << RTE_ETH_EVENT_INTR_RMV);
172 DEBUG("event type %d on port %d not handled",
173 event.event_type, event.element.port_num);
175 ibv_ack_async_event(&event);
179 mlx4_link_update(priv->dev, 0);
180 if (((link->link_speed == 0) && link->link_status) ||
181 ((link->link_speed != 0) && !link->link_status)) {
182 if (!priv->intr_alarm) {
183 /* Inconsistent status, check again later. */
184 priv->intr_alarm = 1;
185 rte_eal_alarm_set(MLX4_INTR_ALARM_TIMEOUT,
187 mlx4_link_status_alarm,
191 *events |= (1 << RTE_ETH_EVENT_INTR_LSC);
197 * Process scheduled link status check.
200 * Pointer to private structure.
203 mlx4_link_status_alarm(struct priv *priv)
208 assert(priv->intr_alarm == 1);
209 priv->intr_alarm = 0;
210 ret = mlx4_collect_interrupt_events(priv, &events);
211 if (ret > 0 && events & (1 << RTE_ETH_EVENT_INTR_LSC))
212 _rte_eth_dev_callback_process(priv->dev,
213 RTE_ETH_EVENT_INTR_LSC,
218 * Handle interrupts from the NIC.
221 * Pointer to private structure.
224 mlx4_interrupt_handler(struct priv *priv)
230 ret = mlx4_collect_interrupt_events(priv, &ev);
232 for (i = RTE_ETH_EVENT_UNKNOWN;
233 i < RTE_ETH_EVENT_MAX;
237 _rte_eth_dev_callback_process(priv->dev, i,
243 WARN("%d event%s not processed", ret,
244 (ret > 1 ? "s were" : " was"));
249 * Uninstall interrupt handler.
252 * Pointer to private structure.
255 * 0 on success, negative errno value otherwise and rte_errno is set.
258 mlx4_intr_uninstall(struct priv *priv)
260 int err = rte_errno; /* Make sure rte_errno remains unchanged. */
262 if (priv->intr_handle.fd != -1) {
263 rte_intr_callback_unregister(&priv->intr_handle,
265 mlx4_interrupt_handler,
267 priv->intr_handle.fd = -1;
269 rte_eal_alarm_cancel((void (*)(void *))mlx4_link_status_alarm, priv);
270 priv->intr_alarm = 0;
271 mlx4_rx_intr_vec_disable(priv);
277 * Install interrupt handler.
280 * Pointer to private structure.
283 * 0 on success, negative errno value otherwise and rte_errno is set.
286 mlx4_intr_install(struct priv *priv)
288 const struct rte_intr_conf *const intr_conf =
289 &priv->dev->data->dev_conf.intr_conf;
292 mlx4_intr_uninstall(priv);
293 if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
295 if (intr_conf->lsc | intr_conf->rmv) {
296 priv->intr_handle.fd = priv->ctx->async_fd;
297 rc = rte_intr_callback_register(&priv->intr_handle,
299 mlx4_interrupt_handler,
308 mlx4_intr_uninstall(priv);
313 * DPDK callback for Rx queue interrupt disable.
316 * Pointer to Ethernet device structure.
321 * 0 on success, negative errno value otherwise and rte_errno is set.
324 mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
326 struct priv *priv = dev->data->dev_private;
327 struct rxq *rxq = (*priv->rxqs)[idx];
328 struct ibv_cq *ev_cq;
332 if (!rxq || !rxq->channel) {
335 ret = ibv_get_cq_event(rxq->cq->channel, &ev_cq, &ev_ctx);
336 if (ret || ev_cq != rxq->cq)
341 WARN("unable to disable interrupt on rx queue %d",
344 ibv_ack_cq_events(rxq->cq, 1);
350 * DPDK callback for Rx queue interrupt enable.
353 * Pointer to Ethernet device structure.
358 * 0 on success, negative errno value otherwise and rte_errno is set.
361 mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
363 struct priv *priv = dev->data->dev_private;
364 struct rxq *rxq = (*priv->rxqs)[idx];
367 if (!rxq || !rxq->channel)
370 ret = ibv_req_notify_cq(rxq->cq, 0);
373 WARN("unable to arm interrupt on rx queue %d", idx);