1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #include <sys/epoll.h>
14 #include <sys/ioctl.h>
15 #include <sys/eventfd.h>
19 #include <rte_common.h>
20 #include <rte_interrupts.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_debug.h>
26 #include <rte_errno.h>
27 #include <rte_spinlock.h>
28 #include <rte_pause.h>
30 #include <rte_eal_trace.h>
32 #include "eal_private.h"
34 #define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
35 #define NB_OTHER_INTR 1
37 static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
53 * union buffer for reading on different devices
55 union rte_intr_read_buffer {
56 int uio_intr_count; /* for uio device */
58 uint64_t vfio_intr_count; /* for vfio device */
60 uint64_t timerfd_num; /* for timerfd */
61 char charbuf[16]; /* for others */
64 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
65 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
67 struct rte_intr_callback {
68 TAILQ_ENTRY(rte_intr_callback) next;
69 rte_intr_callback_fn cb_fn; /**< callback address */
70 void *cb_arg; /**< parameter for callback */
71 uint8_t pending_delete; /**< delete after callback is called */
72 rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
75 struct rte_intr_source {
76 TAILQ_ENTRY(rte_intr_source) next;
77 struct rte_intr_handle *intr_handle; /**< interrupt handle */
78 struct rte_intr_cb_list callbacks; /**< user callbacks */
82 /* global spinlock for interrupt data operation */
83 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
85 /* union buffer for pipe read/write */
86 static union intr_pipefds intr_pipe;
88 /* interrupt sources list */
89 static struct rte_intr_source_list intr_sources;
91 /* interrupt handling thread */
92 static pthread_t intr_thread;
97 #define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
98 /* irq set buffer length for queue interrupts and LSC interrupt */
99 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
100 sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1))
102 /* enable legacy (INTx) interrupts */
104 vfio_enable_intx(const struct rte_intr_handle *intr_handle) {
105 struct vfio_irq_set *irq_set;
106 char irq_set_buf[IRQ_SET_BUF_LEN];
107 int len, ret, vfio_dev_fd;
110 len = sizeof(irq_set_buf);
113 irq_set = (struct vfio_irq_set *) irq_set_buf;
114 irq_set->argsz = len;
116 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
117 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
119 fd_ptr = (int *) &irq_set->data;
120 *fd_ptr = rte_intr_fd_get(intr_handle);
122 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
123 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
126 RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n",
127 rte_intr_fd_get(intr_handle));
131 /* unmask INTx after enabling */
132 memset(irq_set, 0, len);
133 len = sizeof(struct vfio_irq_set);
134 irq_set->argsz = len;
136 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
137 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
140 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
143 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
144 rte_intr_fd_get(intr_handle));
150 /* disable legacy (INTx) interrupts */
152 vfio_disable_intx(const struct rte_intr_handle *intr_handle) {
153 struct vfio_irq_set *irq_set;
154 char irq_set_buf[IRQ_SET_BUF_LEN];
155 int len, ret, vfio_dev_fd;
157 len = sizeof(struct vfio_irq_set);
159 /* mask interrupts before disabling */
160 irq_set = (struct vfio_irq_set *) irq_set_buf;
161 irq_set->argsz = len;
163 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK;
164 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
167 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
168 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
171 RTE_LOG(ERR, EAL, "Error masking INTx interrupts for fd %d\n",
172 rte_intr_fd_get(intr_handle));
177 memset(irq_set, 0, len);
178 irq_set->argsz = len;
180 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
181 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
184 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
187 RTE_LOG(ERR, EAL, "Error disabling INTx interrupts for fd %d\n",
188 rte_intr_fd_get(intr_handle));
194 /* unmask/ack legacy (INTx) interrupts */
196 vfio_ack_intx(const struct rte_intr_handle *intr_handle)
198 struct vfio_irq_set irq_set;
202 memset(&irq_set, 0, sizeof(irq_set));
203 irq_set.argsz = sizeof(irq_set);
205 irq_set.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
206 irq_set.index = VFIO_PCI_INTX_IRQ_INDEX;
209 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
210 if (ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, &irq_set)) {
211 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
212 rte_intr_fd_get(intr_handle));
218 /* enable MSI interrupts */
220 vfio_enable_msi(const struct rte_intr_handle *intr_handle) {
222 char irq_set_buf[IRQ_SET_BUF_LEN];
223 struct vfio_irq_set *irq_set;
224 int *fd_ptr, vfio_dev_fd;
226 len = sizeof(irq_set_buf);
228 irq_set = (struct vfio_irq_set *) irq_set_buf;
229 irq_set->argsz = len;
231 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
232 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
234 fd_ptr = (int *) &irq_set->data;
235 *fd_ptr = rte_intr_fd_get(intr_handle);
237 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
238 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
241 RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n",
242 rte_intr_fd_get(intr_handle));
248 /* disable MSI interrupts */
250 vfio_disable_msi(const struct rte_intr_handle *intr_handle) {
251 struct vfio_irq_set *irq_set;
252 char irq_set_buf[IRQ_SET_BUF_LEN];
253 int len, ret, vfio_dev_fd;
255 len = sizeof(struct vfio_irq_set);
257 irq_set = (struct vfio_irq_set *) irq_set_buf;
258 irq_set->argsz = len;
260 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
261 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
264 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
265 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
267 RTE_LOG(ERR, EAL, "Error disabling MSI interrupts for fd %d\n",
268 rte_intr_fd_get(intr_handle));
273 /* enable MSI-X interrupts */
275 vfio_enable_msix(const struct rte_intr_handle *intr_handle) {
277 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
278 struct vfio_irq_set *irq_set;
279 int *fd_ptr, vfio_dev_fd, i;
281 len = sizeof(irq_set_buf);
283 irq_set = (struct vfio_irq_set *) irq_set_buf;
284 irq_set->argsz = len;
285 /* 0 < irq_set->count < RTE_MAX_RXTX_INTR_VEC_ID + 1 */
286 irq_set->count = rte_intr_max_intr_get(intr_handle) ?
287 (rte_intr_max_intr_get(intr_handle) >
288 RTE_MAX_RXTX_INTR_VEC_ID + 1 ? RTE_MAX_RXTX_INTR_VEC_ID + 1 :
289 rte_intr_max_intr_get(intr_handle)) : 1;
291 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
292 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
294 fd_ptr = (int *) &irq_set->data;
295 /* INTR vector offset 0 reserve for non-efds mapping */
296 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = rte_intr_fd_get(intr_handle);
297 for (i = 0; i < rte_intr_nb_efd_get(intr_handle); i++) {
298 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] =
299 rte_intr_efds_index_get(intr_handle, i);
302 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
303 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
306 RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n",
307 rte_intr_fd_get(intr_handle));
314 /* disable MSI-X interrupts */
316 vfio_disable_msix(const struct rte_intr_handle *intr_handle) {
317 struct vfio_irq_set *irq_set;
318 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
319 int len, ret, vfio_dev_fd;
321 len = sizeof(struct vfio_irq_set);
323 irq_set = (struct vfio_irq_set *) irq_set_buf;
324 irq_set->argsz = len;
326 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
327 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
330 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
331 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
334 RTE_LOG(ERR, EAL, "Error disabling MSI-X interrupts for fd %d\n",
335 rte_intr_fd_get(intr_handle));
340 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
341 /* enable req notifier */
343 vfio_enable_req(const struct rte_intr_handle *intr_handle)
346 char irq_set_buf[IRQ_SET_BUF_LEN];
347 struct vfio_irq_set *irq_set;
348 int *fd_ptr, vfio_dev_fd;
350 len = sizeof(irq_set_buf);
352 irq_set = (struct vfio_irq_set *) irq_set_buf;
353 irq_set->argsz = len;
355 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
356 VFIO_IRQ_SET_ACTION_TRIGGER;
357 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
359 fd_ptr = (int *) &irq_set->data;
360 *fd_ptr = rte_intr_fd_get(intr_handle);
362 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
363 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
366 RTE_LOG(ERR, EAL, "Error enabling req interrupts for fd %d\n",
367 rte_intr_fd_get(intr_handle));
374 /* disable req notifier */
376 vfio_disable_req(const struct rte_intr_handle *intr_handle)
378 struct vfio_irq_set *irq_set;
379 char irq_set_buf[IRQ_SET_BUF_LEN];
380 int len, ret, vfio_dev_fd;
382 len = sizeof(struct vfio_irq_set);
384 irq_set = (struct vfio_irq_set *) irq_set_buf;
385 irq_set->argsz = len;
387 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
388 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
391 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
392 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
395 RTE_LOG(ERR, EAL, "Error disabling req interrupts for fd %d\n",
396 rte_intr_fd_get(intr_handle));
404 uio_intx_intr_disable(const struct rte_intr_handle *intr_handle)
406 unsigned char command_high;
409 /* use UIO config file descriptor for uio_pci_generic */
410 uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
411 if (uio_cfg_fd < 0 || pread(uio_cfg_fd, &command_high, 1, 5) != 1) {
413 "Error reading interrupts status for fd %d\n",
417 /* disable interrupts */
419 if (pwrite(uio_cfg_fd, &command_high, 1, 5) != 1) {
421 "Error disabling interrupts for fd %d\n",
430 uio_intx_intr_enable(const struct rte_intr_handle *intr_handle)
432 unsigned char command_high;
435 /* use UIO config file descriptor for uio_pci_generic */
436 uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
437 if (uio_cfg_fd < 0 || pread(uio_cfg_fd, &command_high, 1, 5) != 1) {
439 "Error reading interrupts status for fd %d\n",
443 /* enable interrupts */
444 command_high &= ~0x4;
445 if (pwrite(uio_cfg_fd, &command_high, 1, 5) != 1) {
447 "Error enabling interrupts for fd %d\n",
456 uio_intr_disable(const struct rte_intr_handle *intr_handle)
460 if (rte_intr_fd_get(intr_handle) < 0 ||
461 write(rte_intr_fd_get(intr_handle), &value, sizeof(value)) < 0) {
462 RTE_LOG(ERR, EAL, "Error disabling interrupts for fd %d (%s)\n",
463 rte_intr_fd_get(intr_handle), strerror(errno));
470 uio_intr_enable(const struct rte_intr_handle *intr_handle)
474 if (rte_intr_fd_get(intr_handle) < 0 ||
475 write(rte_intr_fd_get(intr_handle), &value, sizeof(value)) < 0) {
476 RTE_LOG(ERR, EAL, "Error enabling interrupts for fd %d (%s)\n",
477 rte_intr_fd_get(intr_handle), strerror(errno));
484 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
485 rte_intr_callback_fn cb, void *cb_arg)
487 int ret, wake_thread;
488 struct rte_intr_source *src;
489 struct rte_intr_callback *callback;
493 /* first do parameter checking */
494 if (rte_intr_fd_get(intr_handle) < 0 || cb == NULL) {
495 RTE_LOG(ERR, EAL, "Registering with invalid input parameter\n");
499 /* allocate a new interrupt callback entity */
500 callback = calloc(1, sizeof(*callback));
501 if (callback == NULL) {
502 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
505 callback->cb_fn = cb;
506 callback->cb_arg = cb_arg;
507 callback->pending_delete = 0;
508 callback->ucb_fn = NULL;
510 rte_spinlock_lock(&intr_lock);
512 /* check if there is at least one callback registered for the fd */
513 TAILQ_FOREACH(src, &intr_sources, next) {
514 if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle)) {
515 /* we had no interrupts for this */
516 if (TAILQ_EMPTY(&src->callbacks))
519 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
525 /* no existing callbacks for this - add new source */
527 src = calloc(1, sizeof(*src));
529 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
534 src->intr_handle = rte_intr_instance_dup(intr_handle);
535 if (src->intr_handle == NULL) {
536 RTE_LOG(ERR, EAL, "Can not create intr instance\n");
543 TAILQ_INIT(&src->callbacks);
544 TAILQ_INSERT_TAIL(&(src->callbacks), callback,
546 TAILQ_INSERT_TAIL(&intr_sources, src, next);
553 rte_spinlock_unlock(&intr_lock);
556 * check if need to notify the pipe fd waited by epoll_wait to
557 * rebuild the wait list.
560 if (write(intr_pipe.writefd, "1", 1) < 0)
563 rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
568 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
569 rte_intr_callback_fn cb_fn, void *cb_arg,
570 rte_intr_unregister_callback_fn ucb_fn)
573 struct rte_intr_source *src;
574 struct rte_intr_callback *cb, *next;
576 /* do parameter checking first */
577 if (rte_intr_fd_get(intr_handle) < 0) {
578 RTE_LOG(ERR, EAL, "Unregistering with invalid input parameter\n");
582 rte_spinlock_lock(&intr_lock);
584 /* check if the interrupt source for the fd is existent */
585 TAILQ_FOREACH(src, &intr_sources, next) {
586 if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
590 /* No interrupt source registered for the fd */
594 /* only usable if the source is active */
595 } else if (src->active == 0) {
601 /* walk through the callbacks and mark all that match. */
602 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
603 next = TAILQ_NEXT(cb, next);
604 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
605 cb->cb_arg == cb_arg)) {
606 cb->pending_delete = 1;
613 rte_spinlock_unlock(&intr_lock);
619 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
620 rte_intr_callback_fn cb_fn, void *cb_arg)
623 struct rte_intr_source *src;
624 struct rte_intr_callback *cb, *next;
626 /* do parameter checking first */
627 if (rte_intr_fd_get(intr_handle) < 0) {
628 RTE_LOG(ERR, EAL, "Unregistering with invalid input parameter\n");
632 rte_spinlock_lock(&intr_lock);
634 /* check if the interrupt source for the fd is existent */
635 TAILQ_FOREACH(src, &intr_sources, next)
636 if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
639 /* No interrupt source registered for the fd */
643 /* interrupt source has some active callbacks right now. */
644 } else if (src->active != 0) {
651 /*walk through the callbacks and remove all that match. */
652 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
654 next = TAILQ_NEXT(cb, next);
656 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
657 cb->cb_arg == cb_arg)) {
658 TAILQ_REMOVE(&src->callbacks, cb, next);
664 /* all callbacks for that source are removed. */
665 if (TAILQ_EMPTY(&src->callbacks)) {
666 TAILQ_REMOVE(&intr_sources, src, next);
667 rte_intr_instance_free(src->intr_handle);
672 rte_spinlock_unlock(&intr_lock);
674 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
675 if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
679 rte_eal_trace_intr_callback_unregister(intr_handle, cb_fn, cb_arg,
685 rte_intr_callback_unregister_sync(const struct rte_intr_handle *intr_handle,
686 rte_intr_callback_fn cb_fn, void *cb_arg)
690 while ((ret = rte_intr_callback_unregister(intr_handle, cb_fn, cb_arg)) == -EAGAIN)
697 rte_intr_enable(const struct rte_intr_handle *intr_handle)
699 int rc = 0, uio_cfg_fd;
701 if (intr_handle == NULL)
704 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
709 uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
710 if (rte_intr_fd_get(intr_handle) < 0 || uio_cfg_fd < 0) {
715 switch (rte_intr_type_get(intr_handle)) {
716 /* write to the uio fd to enable the interrupt */
717 case RTE_INTR_HANDLE_UIO:
718 if (uio_intr_enable(intr_handle))
721 case RTE_INTR_HANDLE_UIO_INTX:
722 if (uio_intx_intr_enable(intr_handle))
725 /* not used at this moment */
726 case RTE_INTR_HANDLE_ALARM:
730 case RTE_INTR_HANDLE_VFIO_MSIX:
731 if (vfio_enable_msix(intr_handle))
734 case RTE_INTR_HANDLE_VFIO_MSI:
735 if (vfio_enable_msi(intr_handle))
738 case RTE_INTR_HANDLE_VFIO_LEGACY:
739 if (vfio_enable_intx(intr_handle))
742 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
743 case RTE_INTR_HANDLE_VFIO_REQ:
744 if (vfio_enable_req(intr_handle))
749 /* not used at this moment */
750 case RTE_INTR_HANDLE_DEV_EVENT:
753 /* unknown handle type */
755 RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
756 rte_intr_fd_get(intr_handle));
761 rte_eal_trace_intr_enable(intr_handle, rc);
766 * PMD generally calls this function at the end of its IRQ callback.
767 * Internally, it unmasks the interrupt if possible.
769 * For INTx, unmasking is required as the interrupt is auto-masked prior to
772 * For MSI/MSI-X, unmasking is typically not needed as the interrupt is not
773 * auto-masked. In fact, for interrupt handle types VFIO_MSIX and VFIO_MSI,
774 * this function is no-op.
777 rte_intr_ack(const struct rte_intr_handle *intr_handle)
781 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV)
784 uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
785 if (rte_intr_fd_get(intr_handle) < 0 || uio_cfg_fd < 0)
788 switch (rte_intr_type_get(intr_handle)) {
789 /* Both acking and enabling are same for UIO */
790 case RTE_INTR_HANDLE_UIO:
791 if (uio_intr_enable(intr_handle))
794 case RTE_INTR_HANDLE_UIO_INTX:
795 if (uio_intx_intr_enable(intr_handle))
798 /* not used at this moment */
799 case RTE_INTR_HANDLE_ALARM:
802 /* VFIO MSI* is implicitly acked unlike INTx, nothing to do */
803 case RTE_INTR_HANDLE_VFIO_MSIX:
804 case RTE_INTR_HANDLE_VFIO_MSI:
806 case RTE_INTR_HANDLE_VFIO_LEGACY:
807 if (vfio_ack_intx(intr_handle))
810 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
811 case RTE_INTR_HANDLE_VFIO_REQ:
815 /* not used at this moment */
816 case RTE_INTR_HANDLE_DEV_EVENT:
818 /* unknown handle type */
820 RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
821 rte_intr_fd_get(intr_handle));
829 rte_intr_disable(const struct rte_intr_handle *intr_handle)
831 int rc = 0, uio_cfg_fd;
833 if (intr_handle == NULL)
836 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
841 uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
842 if (rte_intr_fd_get(intr_handle) < 0 || uio_cfg_fd < 0) {
847 switch (rte_intr_type_get(intr_handle)) {
848 /* write to the uio fd to disable the interrupt */
849 case RTE_INTR_HANDLE_UIO:
850 if (uio_intr_disable(intr_handle))
853 case RTE_INTR_HANDLE_UIO_INTX:
854 if (uio_intx_intr_disable(intr_handle))
857 /* not used at this moment */
858 case RTE_INTR_HANDLE_ALARM:
862 case RTE_INTR_HANDLE_VFIO_MSIX:
863 if (vfio_disable_msix(intr_handle))
866 case RTE_INTR_HANDLE_VFIO_MSI:
867 if (vfio_disable_msi(intr_handle))
870 case RTE_INTR_HANDLE_VFIO_LEGACY:
871 if (vfio_disable_intx(intr_handle))
874 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
875 case RTE_INTR_HANDLE_VFIO_REQ:
876 if (vfio_disable_req(intr_handle))
881 /* not used at this moment */
882 case RTE_INTR_HANDLE_DEV_EVENT:
885 /* unknown handle type */
887 RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
888 rte_intr_fd_get(intr_handle));
893 rte_eal_trace_intr_disable(intr_handle, rc);
898 eal_intr_process_interrupts(struct epoll_event *events, int nfds)
901 int n, bytes_read, rv;
902 struct rte_intr_source *src;
903 struct rte_intr_callback *cb, *next;
904 union rte_intr_read_buffer buf;
905 struct rte_intr_callback active_cb;
907 for (n = 0; n < nfds; n++) {
910 * if the pipe fd is ready to read, return out to
911 * rebuild the wait list.
913 if (events[n].data.fd == intr_pipe.readfd){
914 int r = read(intr_pipe.readfd, buf.charbuf,
915 sizeof(buf.charbuf));
919 rte_spinlock_lock(&intr_lock);
920 TAILQ_FOREACH(src, &intr_sources, next)
921 if (rte_intr_fd_get(src->intr_handle) == events[n].data.fd)
924 rte_spinlock_unlock(&intr_lock);
928 /* mark this interrupt source as active and release the lock. */
930 rte_spinlock_unlock(&intr_lock);
932 /* set the length to be read dor different handle type */
933 switch (rte_intr_type_get(src->intr_handle)) {
934 case RTE_INTR_HANDLE_UIO:
935 case RTE_INTR_HANDLE_UIO_INTX:
936 bytes_read = sizeof(buf.uio_intr_count);
938 case RTE_INTR_HANDLE_ALARM:
939 bytes_read = sizeof(buf.timerfd_num);
942 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
943 case RTE_INTR_HANDLE_VFIO_REQ:
945 case RTE_INTR_HANDLE_VFIO_MSIX:
946 case RTE_INTR_HANDLE_VFIO_MSI:
947 case RTE_INTR_HANDLE_VFIO_LEGACY:
948 bytes_read = sizeof(buf.vfio_intr_count);
951 case RTE_INTR_HANDLE_VDEV:
952 case RTE_INTR_HANDLE_EXT:
956 case RTE_INTR_HANDLE_DEV_EVENT:
965 if (bytes_read > 0) {
967 * read out to clear the ready-to-be-read flag
970 bytes_read = read(events[n].data.fd, &buf, bytes_read);
971 if (bytes_read < 0) {
972 if (errno == EINTR || errno == EWOULDBLOCK)
975 RTE_LOG(ERR, EAL, "Error reading from file "
976 "descriptor %d: %s\n",
980 * The device is unplugged or buggy, remove
981 * it as an interrupt source and return to
982 * force the wait list to be rebuilt.
984 rte_spinlock_lock(&intr_lock);
985 TAILQ_REMOVE(&intr_sources, src, next);
986 rte_spinlock_unlock(&intr_lock);
988 for (cb = TAILQ_FIRST(&src->callbacks); cb;
990 next = TAILQ_NEXT(cb, next);
991 TAILQ_REMOVE(&src->callbacks, cb, next);
994 rte_intr_instance_free(src->intr_handle);
997 } else if (bytes_read == 0)
998 RTE_LOG(ERR, EAL, "Read nothing from file "
999 "descriptor %d\n", events[n].data.fd);
1004 /* grab a lock, again to call callbacks and update status. */
1005 rte_spinlock_lock(&intr_lock);
1009 /* Finally, call all callbacks. */
1010 TAILQ_FOREACH(cb, &src->callbacks, next) {
1012 /* make a copy and unlock. */
1014 rte_spinlock_unlock(&intr_lock);
1016 /* call the actual callback */
1017 active_cb.cb_fn(active_cb.cb_arg);
1019 /*get the lock back. */
1020 rte_spinlock_lock(&intr_lock);
1023 /* we done with that interrupt source, release it. */
1028 /* check if any callback are supposed to be removed */
1029 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
1030 next = TAILQ_NEXT(cb, next);
1031 if (cb->pending_delete) {
1032 TAILQ_REMOVE(&src->callbacks, cb, next);
1034 cb->ucb_fn(src->intr_handle, cb->cb_arg);
1040 /* all callbacks for that source are removed. */
1041 if (TAILQ_EMPTY(&src->callbacks)) {
1042 TAILQ_REMOVE(&intr_sources, src, next);
1043 rte_intr_instance_free(src->intr_handle);
1047 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
1048 if (rv > 0 && write(intr_pipe.writefd, "1", 1) < 0) {
1049 rte_spinlock_unlock(&intr_lock);
1053 rte_spinlock_unlock(&intr_lock);
1060 * It handles all the interrupts.
1063 * epoll file descriptor.
1065 * The number of file descriptors added in epoll.
1071 eal_intr_handle_interrupts(int pfd, unsigned totalfds)
1073 struct epoll_event events[totalfds];
1077 nfds = epoll_wait(pfd, events, totalfds,
1078 EAL_INTR_EPOLL_WAIT_FOREVER);
1079 /* epoll_wait fail */
1084 "epoll_wait returns with fail\n");
1087 /* epoll_wait timeout, will never happens here */
1090 /* epoll_wait has at least one fd ready to read */
1091 if (eal_intr_process_interrupts(events, nfds) < 0)
1097 * It builds/rebuilds up the epoll file descriptor with all the
1098 * file descriptors being waited on. Then handles the interrupts.
1106 static __rte_noreturn void *
1107 eal_intr_thread_main(__rte_unused void *arg)
1109 /* host thread, never break out */
1111 /* build up the epoll fd with all descriptors we are to
1112 * wait on then pass it to the handle_interrupts function
1114 static struct epoll_event pipe_event = {
1115 .events = EPOLLIN | EPOLLPRI,
1117 struct rte_intr_source *src;
1118 unsigned numfds = 0;
1120 /* create epoll fd */
1121 int pfd = epoll_create(1);
1123 rte_panic("Cannot create epoll instance\n");
1125 pipe_event.data.fd = intr_pipe.readfd;
1127 * add pipe fd into wait list, this pipe is used to
1128 * rebuild the wait list.
1130 if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
1132 rte_panic("Error adding fd to %d epoll_ctl, %s\n",
1133 intr_pipe.readfd, strerror(errno));
1137 rte_spinlock_lock(&intr_lock);
1139 TAILQ_FOREACH(src, &intr_sources, next) {
1140 struct epoll_event ev;
1142 if (src->callbacks.tqh_first == NULL)
1143 continue; /* skip those with no callbacks */
1144 memset(&ev, 0, sizeof(ev));
1145 ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP;
1146 ev.data.fd = rte_intr_fd_get(src->intr_handle);
1149 * add all the uio device file descriptor
1152 if (epoll_ctl(pfd, EPOLL_CTL_ADD,
1153 rte_intr_fd_get(src->intr_handle), &ev) < 0) {
1154 rte_panic("Error adding fd %d epoll_ctl, %s\n",
1155 rte_intr_fd_get(src->intr_handle),
1161 rte_spinlock_unlock(&intr_lock);
1162 /* serve the interrupt */
1163 eal_intr_handle_interrupts(pfd, numfds);
1166 * when we return, we need to rebuild the
1167 * list of fds to monitor.
1174 rte_eal_intr_init(void)
1178 /* init the global interrupt source head */
1179 TAILQ_INIT(&intr_sources);
1182 * create a pipe which will be waited by epoll and notified to
1183 * rebuild the wait list of epoll.
1185 if (pipe(intr_pipe.pipefd) < 0) {
1190 /* create the host thread to wait/handle the interrupt */
1191 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
1192 eal_intr_thread_main, NULL);
1196 "Failed to create thread for interrupt handling\n");
1203 eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
1205 union rte_intr_read_buffer buf;
1209 switch (rte_intr_type_get(intr_handle)) {
1210 case RTE_INTR_HANDLE_UIO:
1211 case RTE_INTR_HANDLE_UIO_INTX:
1212 bytes_read = sizeof(buf.uio_intr_count);
1215 case RTE_INTR_HANDLE_VFIO_MSIX:
1216 case RTE_INTR_HANDLE_VFIO_MSI:
1217 case RTE_INTR_HANDLE_VFIO_LEGACY:
1218 bytes_read = sizeof(buf.vfio_intr_count);
1221 case RTE_INTR_HANDLE_VDEV:
1222 bytes_read = rte_intr_efd_counter_size_get(intr_handle);
1223 /* For vdev, number of bytes to read is set by driver */
1225 case RTE_INTR_HANDLE_EXT:
1229 RTE_LOG(INFO, EAL, "unexpected intr type\n");
1234 * read out to clear the ready-to-be-read flag
1237 if (bytes_read == 0)
1240 nbytes = read(fd, &buf, bytes_read);
1242 if (errno == EINTR || errno == EWOULDBLOCK ||
1246 "Error reading from fd %d: %s\n",
1247 fd, strerror(errno));
1248 } else if (nbytes == 0)
1249 RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd);
1255 eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
1256 struct rte_epoll_event *events)
1258 unsigned int i, count = 0;
1259 struct rte_epoll_event *rev;
1260 uint32_t valid_status;
1262 for (i = 0; i < n; i++) {
1263 rev = evs[i].data.ptr;
1264 valid_status = RTE_EPOLL_VALID;
1265 /* ACQUIRE memory ordering here pairs with RELEASE
1266 * ordering below acting as a lock to synchronize
1267 * the event data updating.
1269 if (!rev || !__atomic_compare_exchange_n(&rev->status,
1270 &valid_status, RTE_EPOLL_EXEC, 0,
1271 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
1274 events[count].status = RTE_EPOLL_VALID;
1275 events[count].fd = rev->fd;
1276 events[count].epfd = rev->epfd;
1277 events[count].epdata.event = evs[i].events;
1278 events[count].epdata.data = rev->epdata.data;
1279 if (rev->epdata.cb_fun)
1280 rev->epdata.cb_fun(rev->fd,
1281 rev->epdata.cb_arg);
1283 /* the status update should be observed after
1284 * the other fields change.
1286 __atomic_store_n(&rev->status, RTE_EPOLL_VALID,
1294 eal_init_tls_epfd(void)
1296 int pfd = epoll_create(255);
1300 "Cannot create epoll instance\n");
1307 rte_intr_tls_epfd(void)
1309 if (RTE_PER_LCORE(_epfd) == -1)
1310 RTE_PER_LCORE(_epfd) = eal_init_tls_epfd();
1312 return RTE_PER_LCORE(_epfd);
1316 eal_epoll_wait(int epfd, struct rte_epoll_event *events,
1317 int maxevents, int timeout, bool interruptible)
1319 struct epoll_event evs[maxevents];
1323 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1327 /* using per thread epoll fd */
1328 if (epfd == RTE_EPOLL_PER_THREAD)
1329 epfd = rte_intr_tls_epfd();
1332 rc = epoll_wait(epfd, evs, maxevents, timeout);
1333 if (likely(rc > 0)) {
1334 /* epoll_wait has at least one fd ready to read */
1335 rc = eal_epoll_process_event(evs, rc, events);
1337 } else if (rc < 0) {
1338 if (errno == EINTR) {
1344 /* epoll_wait fail */
1345 RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
1350 /* rc == 0, epoll_wait timed out */
1359 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
1360 int maxevents, int timeout)
1362 return eal_epoll_wait(epfd, events, maxevents, timeout, false);
1366 rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
1367 int maxevents, int timeout)
1369 return eal_epoll_wait(epfd, events, maxevents, timeout, true);
1373 eal_epoll_data_safe_free(struct rte_epoll_event *ev)
1375 uint32_t valid_status = RTE_EPOLL_VALID;
1377 while (!__atomic_compare_exchange_n(&ev->status, &valid_status,
1378 RTE_EPOLL_INVALID, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
1379 while (__atomic_load_n(&ev->status,
1380 __ATOMIC_RELAXED) != RTE_EPOLL_VALID)
1382 valid_status = RTE_EPOLL_VALID;
1384 memset(&ev->epdata, 0, sizeof(ev->epdata));
1390 rte_epoll_ctl(int epfd, int op, int fd,
1391 struct rte_epoll_event *event)
1393 struct epoll_event ev;
1396 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1400 /* using per thread epoll fd */
1401 if (epfd == RTE_EPOLL_PER_THREAD)
1402 epfd = rte_intr_tls_epfd();
1404 if (op == EPOLL_CTL_ADD) {
1405 __atomic_store_n(&event->status, RTE_EPOLL_VALID,
1407 event->fd = fd; /* ignore fd in event */
1409 ev.data.ptr = (void *)event;
1412 ev.events = event->epdata.event;
1413 if (epoll_ctl(epfd, op, fd, &ev) < 0) {
1414 RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n",
1415 op, fd, strerror(errno));
1416 if (op == EPOLL_CTL_ADD)
1417 /* rollback status when CTL_ADD fail */
1418 __atomic_store_n(&event->status, RTE_EPOLL_INVALID,
1423 if (op == EPOLL_CTL_DEL && __atomic_load_n(&event->status,
1424 __ATOMIC_RELAXED) != RTE_EPOLL_INVALID)
1425 eal_epoll_data_safe_free(event);
1431 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
1432 int op, unsigned int vec, void *data)
1434 struct rte_epoll_event *rev;
1435 struct rte_epoll_data *epdata;
1437 unsigned int efd_idx;
1440 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
1441 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
1443 if (intr_handle == NULL || rte_intr_nb_efd_get(intr_handle) == 0 ||
1444 efd_idx >= (unsigned int)rte_intr_nb_efd_get(intr_handle)) {
1445 RTE_LOG(ERR, EAL, "Wrong intr vector number.\n");
1450 case RTE_INTR_EVENT_ADD:
1451 epfd_op = EPOLL_CTL_ADD;
1452 rev = rte_intr_elist_index_get(intr_handle, efd_idx);
1453 if (__atomic_load_n(&rev->status,
1454 __ATOMIC_RELAXED) != RTE_EPOLL_INVALID) {
1455 RTE_LOG(INFO, EAL, "Event already been added.\n");
1459 /* attach to intr vector fd */
1460 epdata = &rev->epdata;
1461 epdata->event = EPOLLIN | EPOLLPRI | EPOLLET;
1462 epdata->data = data;
1463 epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr;
1464 epdata->cb_arg = (void *)intr_handle;
1465 rc = rte_epoll_ctl(epfd, epfd_op,
1466 rte_intr_efds_index_get(intr_handle, efd_idx), rev);
1469 "efd %d associated with vec %d added on epfd %d"
1470 "\n", rev->fd, vec, epfd);
1474 case RTE_INTR_EVENT_DEL:
1475 epfd_op = EPOLL_CTL_DEL;
1476 rev = rte_intr_elist_index_get(intr_handle, efd_idx);
1477 if (__atomic_load_n(&rev->status,
1478 __ATOMIC_RELAXED) == RTE_EPOLL_INVALID) {
1479 RTE_LOG(INFO, EAL, "Event does not exist.\n");
1483 rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev);
1488 RTE_LOG(ERR, EAL, "event op type mismatch\n");
1496 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
1499 struct rte_epoll_event *rev;
1501 for (i = 0; i < (uint32_t)rte_intr_nb_efd_get(intr_handle); i++) {
1502 rev = rte_intr_elist_index_get(intr_handle, i);
1503 if (__atomic_load_n(&rev->status,
1504 __ATOMIC_RELAXED) == RTE_EPOLL_INVALID)
1506 if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
1507 /* force free if the entry valid */
1508 eal_epoll_data_safe_free(rev);
1514 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
1518 uint32_t n = RTE_MIN(nb_efd, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1520 assert(nb_efd != 0);
1522 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VFIO_MSIX) {
1523 for (i = 0; i < n; i++) {
1524 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1527 "can't setup eventfd, error %i (%s)\n",
1528 errno, strerror(errno));
1532 if (rte_intr_efds_index_set(intr_handle, i, fd))
1536 if (rte_intr_nb_efd_set(intr_handle, n))
1539 if (rte_intr_max_intr_set(intr_handle, NB_OTHER_INTR + n))
1541 } else if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
1542 /* only check, initialization would be done in vdev driver.*/
1543 if ((uint64_t)rte_intr_efd_counter_size_get(intr_handle) >
1544 sizeof(union rte_intr_read_buffer)) {
1545 RTE_LOG(ERR, EAL, "the efd_counter_size is oversized");
1549 if (rte_intr_efds_index_set(intr_handle, 0, rte_intr_fd_get(intr_handle)))
1551 if (rte_intr_nb_efd_set(intr_handle, RTE_MIN(nb_efd, 1U)))
1553 if (rte_intr_max_intr_set(intr_handle, NB_OTHER_INTR))
1561 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
1565 rte_intr_free_epoll_fd(intr_handle);
1566 if (rte_intr_max_intr_get(intr_handle) > rte_intr_nb_efd_get(intr_handle)) {
1567 for (i = 0; i < (uint32_t)rte_intr_nb_efd_get(intr_handle); i++)
1568 close(rte_intr_efds_index_get(intr_handle, i));
1570 rte_intr_nb_efd_set(intr_handle, 0);
1571 rte_intr_max_intr_set(intr_handle, 0);
1575 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
1577 return !(!rte_intr_nb_efd_get(intr_handle));
1581 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
1583 if (!rte_intr_dp_is_en(intr_handle))
1586 return !!(rte_intr_max_intr_get(intr_handle) -
1587 rte_intr_nb_efd_get(intr_handle));
1591 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
1593 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VFIO_MSIX)
1596 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV)
1602 int rte_thread_is_intr(void)
1604 return pthread_equal(intr_thread, pthread_self());