1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
15 #include <sys/epoll.h>
16 #include <sys/signalfd.h>
17 #include <sys/ioctl.h>
18 #include <sys/eventfd.h>
22 #include <rte_common.h>
23 #include <rte_interrupts.h>
24 #include <rte_memory.h>
25 #include <rte_launch.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_debug.h>
33 #include <rte_errno.h>
34 #include <rte_spinlock.h>
35 #include <rte_pause.h>
37 #include <rte_eal_trace.h>
39 #include "eal_private.h"
41 #include "eal_thread.h"
43 #define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
44 #define NB_OTHER_INTR 1
46 static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
62 * union buffer for reading on different devices
64 union rte_intr_read_buffer {
65 int uio_intr_count; /* for uio device */
67 uint64_t vfio_intr_count; /* for vfio device */
69 uint64_t timerfd_num; /* for timerfd */
70 char charbuf[16]; /* for others */
73 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
74 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
76 struct rte_intr_callback {
77 TAILQ_ENTRY(rte_intr_callback) next;
78 rte_intr_callback_fn cb_fn; /**< callback address */
79 void *cb_arg; /**< parameter for callback */
80 uint8_t pending_delete; /**< delete after callback is called */
81 rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
84 struct rte_intr_source {
85 TAILQ_ENTRY(rte_intr_source) next;
86 struct rte_intr_handle intr_handle; /**< interrupt handle */
87 struct rte_intr_cb_list callbacks; /**< user callbacks */
91 /* global spinlock for interrupt data operation */
92 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
94 /* union buffer for pipe read/write */
95 static union intr_pipefds intr_pipe;
97 /* interrupt sources list */
98 static struct rte_intr_source_list intr_sources;
100 /* interrupt handling thread */
101 static pthread_t intr_thread;
103 /* VFIO interrupts */
106 #define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
107 /* irq set buffer length for queue interrupts and LSC interrupt */
108 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
109 sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1))
111 /* enable legacy (INTx) interrupts */
113 vfio_enable_intx(const struct rte_intr_handle *intr_handle) {
114 struct vfio_irq_set *irq_set;
115 char irq_set_buf[IRQ_SET_BUF_LEN];
119 len = sizeof(irq_set_buf);
122 irq_set = (struct vfio_irq_set *) irq_set_buf;
123 irq_set->argsz = len;
125 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
126 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
128 fd_ptr = (int *) &irq_set->data;
129 *fd_ptr = intr_handle->fd;
131 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
134 RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n",
139 /* unmask INTx after enabling */
140 memset(irq_set, 0, len);
141 len = sizeof(struct vfio_irq_set);
142 irq_set->argsz = len;
144 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
145 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
148 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
151 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
158 /* disable legacy (INTx) interrupts */
160 vfio_disable_intx(const struct rte_intr_handle *intr_handle) {
161 struct vfio_irq_set *irq_set;
162 char irq_set_buf[IRQ_SET_BUF_LEN];
165 len = sizeof(struct vfio_irq_set);
167 /* mask interrupts before disabling */
168 irq_set = (struct vfio_irq_set *) irq_set_buf;
169 irq_set->argsz = len;
171 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK;
172 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
175 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
178 RTE_LOG(ERR, EAL, "Error masking INTx interrupts for fd %d\n",
184 memset(irq_set, 0, len);
185 irq_set->argsz = len;
187 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
188 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
191 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
195 "Error disabling INTx interrupts for fd %d\n", intr_handle->fd);
201 /* unmask/ack legacy (INTx) interrupts */
203 vfio_ack_intx(const struct rte_intr_handle *intr_handle)
205 struct vfio_irq_set irq_set;
208 memset(&irq_set, 0, sizeof(irq_set));
209 irq_set.argsz = sizeof(irq_set);
211 irq_set.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
212 irq_set.index = VFIO_PCI_INTX_IRQ_INDEX;
215 if (ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, &irq_set)) {
216 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
223 /* enable MSI interrupts */
225 vfio_enable_msi(const struct rte_intr_handle *intr_handle) {
227 char irq_set_buf[IRQ_SET_BUF_LEN];
228 struct vfio_irq_set *irq_set;
231 len = sizeof(irq_set_buf);
233 irq_set = (struct vfio_irq_set *) irq_set_buf;
234 irq_set->argsz = len;
236 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
237 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
239 fd_ptr = (int *) &irq_set->data;
240 *fd_ptr = intr_handle->fd;
242 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
245 RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n",
252 /* disable MSI interrupts */
254 vfio_disable_msi(const struct rte_intr_handle *intr_handle) {
255 struct vfio_irq_set *irq_set;
256 char irq_set_buf[IRQ_SET_BUF_LEN];
259 len = sizeof(struct vfio_irq_set);
261 irq_set = (struct vfio_irq_set *) irq_set_buf;
262 irq_set->argsz = len;
264 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
265 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
268 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
272 "Error disabling MSI interrupts for fd %d\n", intr_handle->fd);
277 /* enable MSI-X interrupts */
279 vfio_enable_msix(const struct rte_intr_handle *intr_handle) {
281 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
282 struct vfio_irq_set *irq_set;
285 len = sizeof(irq_set_buf);
287 irq_set = (struct vfio_irq_set *) irq_set_buf;
288 irq_set->argsz = len;
289 /* 0 < irq_set->count < RTE_MAX_RXTX_INTR_VEC_ID + 1 */
290 irq_set->count = intr_handle->max_intr ?
291 (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID + 1 ?
292 RTE_MAX_RXTX_INTR_VEC_ID + 1 : intr_handle->max_intr) : 1;
293 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
294 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
296 fd_ptr = (int *) &irq_set->data;
297 /* INTR vector offset 0 reserve for non-efds mapping */
298 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = intr_handle->fd;
299 memcpy(&fd_ptr[RTE_INTR_VEC_RXTX_OFFSET], intr_handle->efds,
300 sizeof(*intr_handle->efds) * intr_handle->nb_efd);
302 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
305 RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n",
313 /* disable MSI-X interrupts */
315 vfio_disable_msix(const struct rte_intr_handle *intr_handle) {
316 struct vfio_irq_set *irq_set;
317 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
320 len = sizeof(struct vfio_irq_set);
322 irq_set = (struct vfio_irq_set *) irq_set_buf;
323 irq_set->argsz = len;
325 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
326 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
329 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
333 "Error disabling MSI-X interrupts for fd %d\n", intr_handle->fd);
338 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
339 /* enable req notifier */
341 vfio_enable_req(const struct rte_intr_handle *intr_handle)
344 char irq_set_buf[IRQ_SET_BUF_LEN];
345 struct vfio_irq_set *irq_set;
348 len = sizeof(irq_set_buf);
350 irq_set = (struct vfio_irq_set *) irq_set_buf;
351 irq_set->argsz = len;
353 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
354 VFIO_IRQ_SET_ACTION_TRIGGER;
355 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
357 fd_ptr = (int *) &irq_set->data;
358 *fd_ptr = intr_handle->fd;
360 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
363 RTE_LOG(ERR, EAL, "Error enabling req interrupts for fd %d\n",
371 /* disable req notifier */
373 vfio_disable_req(const struct rte_intr_handle *intr_handle)
375 struct vfio_irq_set *irq_set;
376 char irq_set_buf[IRQ_SET_BUF_LEN];
379 len = sizeof(struct vfio_irq_set);
381 irq_set = (struct vfio_irq_set *) irq_set_buf;
382 irq_set->argsz = len;
384 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
385 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
388 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
391 RTE_LOG(ERR, EAL, "Error disabling req interrupts for fd %d\n",
400 uio_intx_intr_disable(const struct rte_intr_handle *intr_handle)
402 unsigned char command_high;
404 /* use UIO config file descriptor for uio_pci_generic */
405 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
407 "Error reading interrupts status for fd %d\n",
408 intr_handle->uio_cfg_fd);
411 /* disable interrupts */
413 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
415 "Error disabling interrupts for fd %d\n",
416 intr_handle->uio_cfg_fd);
424 uio_intx_intr_enable(const struct rte_intr_handle *intr_handle)
426 unsigned char command_high;
428 /* use UIO config file descriptor for uio_pci_generic */
429 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
431 "Error reading interrupts status for fd %d\n",
432 intr_handle->uio_cfg_fd);
435 /* enable interrupts */
436 command_high &= ~0x4;
437 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
439 "Error enabling interrupts for fd %d\n",
440 intr_handle->uio_cfg_fd);
448 uio_intr_disable(const struct rte_intr_handle *intr_handle)
452 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
454 "Error disabling interrupts for fd %d (%s)\n",
455 intr_handle->fd, strerror(errno));
462 uio_intr_enable(const struct rte_intr_handle *intr_handle)
466 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
468 "Error enabling interrupts for fd %d (%s)\n",
469 intr_handle->fd, strerror(errno));
476 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
477 rte_intr_callback_fn cb, void *cb_arg)
479 int ret, wake_thread;
480 struct rte_intr_source *src;
481 struct rte_intr_callback *callback;
485 /* first do parameter checking */
486 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
488 "Registering with invalid input parameter\n");
492 /* allocate a new interrupt callback entity */
493 callback = calloc(1, sizeof(*callback));
494 if (callback == NULL) {
495 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
498 callback->cb_fn = cb;
499 callback->cb_arg = cb_arg;
500 callback->pending_delete = 0;
501 callback->ucb_fn = NULL;
503 rte_spinlock_lock(&intr_lock);
505 /* check if there is at least one callback registered for the fd */
506 TAILQ_FOREACH(src, &intr_sources, next) {
507 if (src->intr_handle.fd == intr_handle->fd) {
508 /* we had no interrupts for this */
509 if (TAILQ_EMPTY(&src->callbacks))
512 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
518 /* no existing callbacks for this - add new source */
520 src = calloc(1, sizeof(*src));
522 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
526 src->intr_handle = *intr_handle;
527 TAILQ_INIT(&src->callbacks);
528 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
529 TAILQ_INSERT_TAIL(&intr_sources, src, next);
535 rte_spinlock_unlock(&intr_lock);
538 * check if need to notify the pipe fd waited by epoll_wait to
539 * rebuild the wait list.
542 if (write(intr_pipe.writefd, "1", 1) < 0)
545 rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
550 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
551 rte_intr_callback_fn cb_fn, void *cb_arg,
552 rte_intr_unregister_callback_fn ucb_fn)
555 struct rte_intr_source *src;
556 struct rte_intr_callback *cb, *next;
558 /* do parameter checking first */
559 if (intr_handle == NULL || intr_handle->fd < 0) {
561 "Unregistering with invalid input parameter\n");
565 rte_spinlock_lock(&intr_lock);
567 /* check if the insterrupt source for the fd is existent */
568 TAILQ_FOREACH(src, &intr_sources, next)
569 if (src->intr_handle.fd == intr_handle->fd)
572 /* No interrupt source registered for the fd */
576 /* only usable if the source is active */
577 } else if (src->active == 0) {
583 /* walk through the callbacks and mark all that match. */
584 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
585 next = TAILQ_NEXT(cb, next);
586 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
587 cb->cb_arg == cb_arg)) {
588 cb->pending_delete = 1;
595 rte_spinlock_unlock(&intr_lock);
601 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
602 rte_intr_callback_fn cb_fn, void *cb_arg)
605 struct rte_intr_source *src;
606 struct rte_intr_callback *cb, *next;
608 /* do parameter checking first */
609 if (intr_handle == NULL || intr_handle->fd < 0) {
611 "Unregistering with invalid input parameter\n");
615 rte_spinlock_lock(&intr_lock);
617 /* check if the insterrupt source for the fd is existent */
618 TAILQ_FOREACH(src, &intr_sources, next)
619 if (src->intr_handle.fd == intr_handle->fd)
622 /* No interrupt source registered for the fd */
626 /* interrupt source has some active callbacks right now. */
627 } else if (src->active != 0) {
634 /*walk through the callbacks and remove all that match. */
635 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
637 next = TAILQ_NEXT(cb, next);
639 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
640 cb->cb_arg == cb_arg)) {
641 TAILQ_REMOVE(&src->callbacks, cb, next);
647 /* all callbacks for that source are removed. */
648 if (TAILQ_EMPTY(&src->callbacks)) {
649 TAILQ_REMOVE(&intr_sources, src, next);
654 rte_spinlock_unlock(&intr_lock);
656 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
657 if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
661 rte_eal_trace_intr_callback_unregister(intr_handle, cb_fn, cb_arg,
667 rte_intr_enable(const struct rte_intr_handle *intr_handle)
671 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV) {
676 if (!intr_handle || intr_handle->fd < 0 ||
677 intr_handle->uio_cfg_fd < 0) {
682 switch (intr_handle->type){
683 /* write to the uio fd to enable the interrupt */
684 case RTE_INTR_HANDLE_UIO:
685 if (uio_intr_enable(intr_handle))
688 case RTE_INTR_HANDLE_UIO_INTX:
689 if (uio_intx_intr_enable(intr_handle))
692 /* not used at this moment */
693 case RTE_INTR_HANDLE_ALARM:
697 case RTE_INTR_HANDLE_VFIO_MSIX:
698 if (vfio_enable_msix(intr_handle))
701 case RTE_INTR_HANDLE_VFIO_MSI:
702 if (vfio_enable_msi(intr_handle))
705 case RTE_INTR_HANDLE_VFIO_LEGACY:
706 if (vfio_enable_intx(intr_handle))
709 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
710 case RTE_INTR_HANDLE_VFIO_REQ:
711 if (vfio_enable_req(intr_handle))
716 /* not used at this moment */
717 case RTE_INTR_HANDLE_DEV_EVENT:
720 /* unknown handle type */
723 "Unknown handle type of fd %d\n",
729 rte_eal_trace_intr_enable(intr_handle, rc);
734 * PMD generally calls this function at the end of its IRQ callback.
735 * Internally, it unmasks the interrupt if possible.
737 * For INTx, unmasking is required as the interrupt is auto-masked prior to
740 * For MSI/MSI-X, unmasking is typically not needed as the interrupt is not
741 * auto-masked. In fact, for interrupt handle types VFIO_MSIX and VFIO_MSI,
742 * this function is no-op.
745 rte_intr_ack(const struct rte_intr_handle *intr_handle)
747 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
750 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
753 switch (intr_handle->type) {
754 /* Both acking and enabling are same for UIO */
755 case RTE_INTR_HANDLE_UIO:
756 if (uio_intr_enable(intr_handle))
759 case RTE_INTR_HANDLE_UIO_INTX:
760 if (uio_intx_intr_enable(intr_handle))
763 /* not used at this moment */
764 case RTE_INTR_HANDLE_ALARM:
767 /* VFIO MSI* is implicitly acked unlike INTx, nothing to do */
768 case RTE_INTR_HANDLE_VFIO_MSIX:
769 case RTE_INTR_HANDLE_VFIO_MSI:
771 case RTE_INTR_HANDLE_VFIO_LEGACY:
772 if (vfio_ack_intx(intr_handle))
775 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
776 case RTE_INTR_HANDLE_VFIO_REQ:
780 /* not used at this moment */
781 case RTE_INTR_HANDLE_DEV_EVENT:
783 /* unknown handle type */
785 RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
794 rte_intr_disable(const struct rte_intr_handle *intr_handle)
798 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV) {
803 if (!intr_handle || intr_handle->fd < 0 ||
804 intr_handle->uio_cfg_fd < 0) {
809 switch (intr_handle->type){
810 /* write to the uio fd to disable the interrupt */
811 case RTE_INTR_HANDLE_UIO:
812 if (uio_intr_disable(intr_handle))
815 case RTE_INTR_HANDLE_UIO_INTX:
816 if (uio_intx_intr_disable(intr_handle))
819 /* not used at this moment */
820 case RTE_INTR_HANDLE_ALARM:
824 case RTE_INTR_HANDLE_VFIO_MSIX:
825 if (vfio_disable_msix(intr_handle))
828 case RTE_INTR_HANDLE_VFIO_MSI:
829 if (vfio_disable_msi(intr_handle))
832 case RTE_INTR_HANDLE_VFIO_LEGACY:
833 if (vfio_disable_intx(intr_handle))
836 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
837 case RTE_INTR_HANDLE_VFIO_REQ:
838 if (vfio_disable_req(intr_handle))
843 /* not used at this moment */
844 case RTE_INTR_HANDLE_DEV_EVENT:
847 /* unknown handle type */
850 "Unknown handle type of fd %d\n",
856 rte_eal_trace_intr_disable(intr_handle, rc);
861 eal_intr_process_interrupts(struct epoll_event *events, int nfds)
864 int n, bytes_read, rv;
865 struct rte_intr_source *src;
866 struct rte_intr_callback *cb, *next;
867 union rte_intr_read_buffer buf;
868 struct rte_intr_callback active_cb;
870 for (n = 0; n < nfds; n++) {
873 * if the pipe fd is ready to read, return out to
874 * rebuild the wait list.
876 if (events[n].data.fd == intr_pipe.readfd){
877 int r = read(intr_pipe.readfd, buf.charbuf,
878 sizeof(buf.charbuf));
882 rte_spinlock_lock(&intr_lock);
883 TAILQ_FOREACH(src, &intr_sources, next)
884 if (src->intr_handle.fd ==
888 rte_spinlock_unlock(&intr_lock);
892 /* mark this interrupt source as active and release the lock. */
894 rte_spinlock_unlock(&intr_lock);
896 /* set the length to be read dor different handle type */
897 switch (src->intr_handle.type) {
898 case RTE_INTR_HANDLE_UIO:
899 case RTE_INTR_HANDLE_UIO_INTX:
900 bytes_read = sizeof(buf.uio_intr_count);
902 case RTE_INTR_HANDLE_ALARM:
903 bytes_read = sizeof(buf.timerfd_num);
906 case RTE_INTR_HANDLE_VFIO_MSIX:
907 case RTE_INTR_HANDLE_VFIO_MSI:
908 case RTE_INTR_HANDLE_VFIO_LEGACY:
909 bytes_read = sizeof(buf.vfio_intr_count);
911 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
912 case RTE_INTR_HANDLE_VFIO_REQ:
918 case RTE_INTR_HANDLE_VDEV:
919 case RTE_INTR_HANDLE_EXT:
923 case RTE_INTR_HANDLE_DEV_EVENT:
932 if (bytes_read > 0) {
934 * read out to clear the ready-to-be-read flag
937 bytes_read = read(events[n].data.fd, &buf, bytes_read);
938 if (bytes_read < 0) {
939 if (errno == EINTR || errno == EWOULDBLOCK)
942 RTE_LOG(ERR, EAL, "Error reading from file "
943 "descriptor %d: %s\n",
947 * The device is unplugged or buggy, remove
948 * it as an interrupt source and return to
949 * force the wait list to be rebuilt.
951 rte_spinlock_lock(&intr_lock);
952 TAILQ_REMOVE(&intr_sources, src, next);
953 rte_spinlock_unlock(&intr_lock);
955 for (cb = TAILQ_FIRST(&src->callbacks); cb;
957 next = TAILQ_NEXT(cb, next);
958 TAILQ_REMOVE(&src->callbacks, cb, next);
963 } else if (bytes_read == 0)
964 RTE_LOG(ERR, EAL, "Read nothing from file "
965 "descriptor %d\n", events[n].data.fd);
970 /* grab a lock, again to call callbacks and update status. */
971 rte_spinlock_lock(&intr_lock);
975 /* Finally, call all callbacks. */
976 TAILQ_FOREACH(cb, &src->callbacks, next) {
978 /* make a copy and unlock. */
980 rte_spinlock_unlock(&intr_lock);
982 /* call the actual callback */
983 active_cb.cb_fn(active_cb.cb_arg);
985 /*get the lock back. */
986 rte_spinlock_lock(&intr_lock);
989 /* we done with that interrupt source, release it. */
994 /* check if any callback are supposed to be removed */
995 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
996 next = TAILQ_NEXT(cb, next);
997 if (cb->pending_delete) {
998 TAILQ_REMOVE(&src->callbacks, cb, next);
1000 cb->ucb_fn(&src->intr_handle, cb->cb_arg);
1006 /* all callbacks for that source are removed. */
1007 if (TAILQ_EMPTY(&src->callbacks)) {
1008 TAILQ_REMOVE(&intr_sources, src, next);
1012 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
1013 if (rv >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
1014 rte_spinlock_unlock(&intr_lock);
1018 rte_spinlock_unlock(&intr_lock);
1025 * It handles all the interrupts.
1028 * epoll file descriptor.
1030 * The number of file descriptors added in epoll.
1036 eal_intr_handle_interrupts(int pfd, unsigned totalfds)
1038 struct epoll_event events[totalfds];
1042 nfds = epoll_wait(pfd, events, totalfds,
1043 EAL_INTR_EPOLL_WAIT_FOREVER);
1044 /* epoll_wait fail */
1049 "epoll_wait returns with fail\n");
1052 /* epoll_wait timeout, will never happens here */
1055 /* epoll_wait has at least one fd ready to read */
1056 if (eal_intr_process_interrupts(events, nfds) < 0)
1062 * It builds/rebuilds up the epoll file descriptor with all the
1063 * file descriptors being waited on. Then handles the interrupts.
1071 static __rte_noreturn void *
1072 eal_intr_thread_main(__rte_unused void *arg)
1074 /* host thread, never break out */
1076 /* build up the epoll fd with all descriptors we are to
1077 * wait on then pass it to the handle_interrupts function
1079 static struct epoll_event pipe_event = {
1080 .events = EPOLLIN | EPOLLPRI,
1082 struct rte_intr_source *src;
1083 unsigned numfds = 0;
1085 /* create epoll fd */
1086 int pfd = epoll_create(1);
1088 rte_panic("Cannot create epoll instance\n");
1090 pipe_event.data.fd = intr_pipe.readfd;
1092 * add pipe fd into wait list, this pipe is used to
1093 * rebuild the wait list.
1095 if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
1097 rte_panic("Error adding fd to %d epoll_ctl, %s\n",
1098 intr_pipe.readfd, strerror(errno));
1102 rte_spinlock_lock(&intr_lock);
1104 TAILQ_FOREACH(src, &intr_sources, next) {
1105 struct epoll_event ev;
1107 if (src->callbacks.tqh_first == NULL)
1108 continue; /* skip those with no callbacks */
1109 memset(&ev, 0, sizeof(ev));
1110 ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP;
1111 ev.data.fd = src->intr_handle.fd;
1114 * add all the uio device file descriptor
1117 if (epoll_ctl(pfd, EPOLL_CTL_ADD,
1118 src->intr_handle.fd, &ev) < 0){
1119 rte_panic("Error adding fd %d epoll_ctl, %s\n",
1120 src->intr_handle.fd, strerror(errno));
1125 rte_spinlock_unlock(&intr_lock);
1126 /* serve the interrupt */
1127 eal_intr_handle_interrupts(pfd, numfds);
1130 * when we return, we need to rebuild the
1131 * list of fds to monitor.
1138 rte_eal_intr_init(void)
1142 /* init the global interrupt source head */
1143 TAILQ_INIT(&intr_sources);
1146 * create a pipe which will be waited by epoll and notified to
1147 * rebuild the wait list of epoll.
1149 if (pipe(intr_pipe.pipefd) < 0) {
1154 /* create the host thread to wait/handle the interrupt */
1155 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
1156 eal_intr_thread_main, NULL);
1160 "Failed to create thread for interrupt handling\n");
1167 eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
1169 union rte_intr_read_buffer buf;
1173 switch (intr_handle->type) {
1174 case RTE_INTR_HANDLE_UIO:
1175 case RTE_INTR_HANDLE_UIO_INTX:
1176 bytes_read = sizeof(buf.uio_intr_count);
1179 case RTE_INTR_HANDLE_VFIO_MSIX:
1180 case RTE_INTR_HANDLE_VFIO_MSI:
1181 case RTE_INTR_HANDLE_VFIO_LEGACY:
1182 bytes_read = sizeof(buf.vfio_intr_count);
1185 case RTE_INTR_HANDLE_VDEV:
1186 bytes_read = intr_handle->efd_counter_size;
1187 /* For vdev, number of bytes to read is set by driver */
1189 case RTE_INTR_HANDLE_EXT:
1193 RTE_LOG(INFO, EAL, "unexpected intr type\n");
1198 * read out to clear the ready-to-be-read flag
1201 if (bytes_read == 0)
1204 nbytes = read(fd, &buf, bytes_read);
1206 if (errno == EINTR || errno == EWOULDBLOCK ||
1210 "Error reading from fd %d: %s\n",
1211 fd, strerror(errno));
1212 } else if (nbytes == 0)
1213 RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd);
1219 eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
1220 struct rte_epoll_event *events)
1222 unsigned int i, count = 0;
1223 struct rte_epoll_event *rev;
1225 for (i = 0; i < n; i++) {
1226 rev = evs[i].data.ptr;
1227 if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID,
1231 events[count].status = RTE_EPOLL_VALID;
1232 events[count].fd = rev->fd;
1233 events[count].epfd = rev->epfd;
1234 events[count].epdata.event = rev->epdata.event;
1235 events[count].epdata.data = rev->epdata.data;
1236 if (rev->epdata.cb_fun)
1237 rev->epdata.cb_fun(rev->fd,
1238 rev->epdata.cb_arg);
1240 rte_compiler_barrier();
1241 rev->status = RTE_EPOLL_VALID;
1248 eal_init_tls_epfd(void)
1250 int pfd = epoll_create(255);
1254 "Cannot create epoll instance\n");
1261 rte_intr_tls_epfd(void)
1263 if (RTE_PER_LCORE(_epfd) == -1)
1264 RTE_PER_LCORE(_epfd) = eal_init_tls_epfd();
1266 return RTE_PER_LCORE(_epfd);
1270 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
1271 int maxevents, int timeout)
1273 struct epoll_event evs[maxevents];
1277 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1281 /* using per thread epoll fd */
1282 if (epfd == RTE_EPOLL_PER_THREAD)
1283 epfd = rte_intr_tls_epfd();
1286 rc = epoll_wait(epfd, evs, maxevents, timeout);
1287 if (likely(rc > 0)) {
1288 /* epoll_wait has at least one fd ready to read */
1289 rc = eal_epoll_process_event(evs, rc, events);
1291 } else if (rc < 0) {
1294 /* epoll_wait fail */
1295 RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
1300 /* rc == 0, epoll_wait timed out */
1309 eal_epoll_data_safe_free(struct rte_epoll_event *ev)
1311 while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID,
1313 while (ev->status != RTE_EPOLL_VALID)
1315 memset(&ev->epdata, 0, sizeof(ev->epdata));
1321 rte_epoll_ctl(int epfd, int op, int fd,
1322 struct rte_epoll_event *event)
1324 struct epoll_event ev;
1327 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1331 /* using per thread epoll fd */
1332 if (epfd == RTE_EPOLL_PER_THREAD)
1333 epfd = rte_intr_tls_epfd();
1335 if (op == EPOLL_CTL_ADD) {
1336 event->status = RTE_EPOLL_VALID;
1337 event->fd = fd; /* ignore fd in event */
1339 ev.data.ptr = (void *)event;
1342 ev.events = event->epdata.event;
1343 if (epoll_ctl(epfd, op, fd, &ev) < 0) {
1344 RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n",
1345 op, fd, strerror(errno));
1346 if (op == EPOLL_CTL_ADD)
1347 /* rollback status when CTL_ADD fail */
1348 event->status = RTE_EPOLL_INVALID;
1352 if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID)
1353 eal_epoll_data_safe_free(event);
1359 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
1360 int op, unsigned int vec, void *data)
1362 struct rte_epoll_event *rev;
1363 struct rte_epoll_data *epdata;
1365 unsigned int efd_idx;
1368 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
1369 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
1371 if (!intr_handle || intr_handle->nb_efd == 0 ||
1372 efd_idx >= intr_handle->nb_efd) {
1373 RTE_LOG(ERR, EAL, "Wrong intr vector number.\n");
1378 case RTE_INTR_EVENT_ADD:
1379 epfd_op = EPOLL_CTL_ADD;
1380 rev = &intr_handle->elist[efd_idx];
1381 if (rev->status != RTE_EPOLL_INVALID) {
1382 RTE_LOG(INFO, EAL, "Event already been added.\n");
1386 /* attach to intr vector fd */
1387 epdata = &rev->epdata;
1388 epdata->event = EPOLLIN | EPOLLPRI | EPOLLET;
1389 epdata->data = data;
1390 epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr;
1391 epdata->cb_arg = (void *)intr_handle;
1392 rc = rte_epoll_ctl(epfd, epfd_op,
1393 intr_handle->efds[efd_idx], rev);
1396 "efd %d associated with vec %d added on epfd %d"
1397 "\n", rev->fd, vec, epfd);
1401 case RTE_INTR_EVENT_DEL:
1402 epfd_op = EPOLL_CTL_DEL;
1403 rev = &intr_handle->elist[efd_idx];
1404 if (rev->status == RTE_EPOLL_INVALID) {
1405 RTE_LOG(INFO, EAL, "Event does not exist.\n");
1409 rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev);
1414 RTE_LOG(ERR, EAL, "event op type mismatch\n");
1422 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
1425 struct rte_epoll_event *rev;
1427 for (i = 0; i < intr_handle->nb_efd; i++) {
1428 rev = &intr_handle->elist[i];
1429 if (rev->status == RTE_EPOLL_INVALID)
1431 if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
1432 /* force free if the entry valid */
1433 eal_epoll_data_safe_free(rev);
1434 rev->status = RTE_EPOLL_INVALID;
1440 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
1444 uint32_t n = RTE_MIN(nb_efd, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1446 assert(nb_efd != 0);
1448 if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) {
1449 for (i = 0; i < n; i++) {
1450 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1453 "can't setup eventfd, error %i (%s)\n",
1454 errno, strerror(errno));
1457 intr_handle->efds[i] = fd;
1459 intr_handle->nb_efd = n;
1460 intr_handle->max_intr = NB_OTHER_INTR + n;
1461 } else if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
1462 /* only check, initialization would be done in vdev driver.*/
1463 if (intr_handle->efd_counter_size >
1464 sizeof(union rte_intr_read_buffer)) {
1465 RTE_LOG(ERR, EAL, "the efd_counter_size is oversized");
1469 intr_handle->efds[0] = intr_handle->fd;
1470 intr_handle->nb_efd = RTE_MIN(nb_efd, 1U);
1471 intr_handle->max_intr = NB_OTHER_INTR;
1478 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
1482 rte_intr_free_epoll_fd(intr_handle);
1483 if (intr_handle->max_intr > intr_handle->nb_efd) {
1484 for (i = 0; i < intr_handle->nb_efd; i++)
1485 close(intr_handle->efds[i]);
1487 intr_handle->nb_efd = 0;
1488 intr_handle->max_intr = 0;
1492 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
1494 return !(!intr_handle->nb_efd);
1498 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
1500 if (!rte_intr_dp_is_en(intr_handle))
1503 return !!(intr_handle->max_intr - intr_handle->nb_efd);
1507 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
1509 if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)
1512 if (intr_handle->type == RTE_INTR_HANDLE_VDEV)
1518 int rte_thread_is_intr(void)
1520 return pthread_equal(intr_thread, pthread_self());