1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
15 #include <sys/epoll.h>
16 #include <sys/signalfd.h>
17 #include <sys/ioctl.h>
18 #include <sys/eventfd.h>
22 #include <rte_common.h>
23 #include <rte_interrupts.h>
24 #include <rte_memory.h>
25 #include <rte_launch.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_debug.h>
32 #include <rte_errno.h>
33 #include <rte_spinlock.h>
34 #include <rte_pause.h>
36 #include <rte_eal_trace.h>
38 #include "eal_private.h"
40 #include "eal_thread.h"
42 #define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
43 #define NB_OTHER_INTR 1
45 static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
61 * union buffer for reading on different devices
63 union rte_intr_read_buffer {
64 int uio_intr_count; /* for uio device */
66 uint64_t vfio_intr_count; /* for vfio device */
68 uint64_t timerfd_num; /* for timerfd */
69 char charbuf[16]; /* for others */
72 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
73 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
75 struct rte_intr_callback {
76 TAILQ_ENTRY(rte_intr_callback) next;
77 rte_intr_callback_fn cb_fn; /**< callback address */
78 void *cb_arg; /**< parameter for callback */
79 uint8_t pending_delete; /**< delete after callback is called */
80 rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
83 struct rte_intr_source {
84 TAILQ_ENTRY(rte_intr_source) next;
85 struct rte_intr_handle intr_handle; /**< interrupt handle */
86 struct rte_intr_cb_list callbacks; /**< user callbacks */
90 /* global spinlock for interrupt data operation */
91 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
93 /* union buffer for pipe read/write */
94 static union intr_pipefds intr_pipe;
96 /* interrupt sources list */
97 static struct rte_intr_source_list intr_sources;
99 /* interrupt handling thread */
100 static pthread_t intr_thread;
102 /* VFIO interrupts */
105 #define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
106 /* irq set buffer length for queue interrupts and LSC interrupt */
107 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
108 sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1))
110 /* enable legacy (INTx) interrupts */
112 vfio_enable_intx(const struct rte_intr_handle *intr_handle) {
113 struct vfio_irq_set *irq_set;
114 char irq_set_buf[IRQ_SET_BUF_LEN];
118 len = sizeof(irq_set_buf);
121 irq_set = (struct vfio_irq_set *) irq_set_buf;
122 irq_set->argsz = len;
124 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
125 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
127 fd_ptr = (int *) &irq_set->data;
128 *fd_ptr = intr_handle->fd;
130 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
133 RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n",
138 /* unmask INTx after enabling */
139 memset(irq_set, 0, len);
140 len = sizeof(struct vfio_irq_set);
141 irq_set->argsz = len;
143 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
144 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
147 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
150 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
157 /* disable legacy (INTx) interrupts */
159 vfio_disable_intx(const struct rte_intr_handle *intr_handle) {
160 struct vfio_irq_set *irq_set;
161 char irq_set_buf[IRQ_SET_BUF_LEN];
164 len = sizeof(struct vfio_irq_set);
166 /* mask interrupts before disabling */
167 irq_set = (struct vfio_irq_set *) irq_set_buf;
168 irq_set->argsz = len;
170 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK;
171 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
174 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
177 RTE_LOG(ERR, EAL, "Error masking INTx interrupts for fd %d\n",
183 memset(irq_set, 0, len);
184 irq_set->argsz = len;
186 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
187 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
190 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
194 "Error disabling INTx interrupts for fd %d\n", intr_handle->fd);
200 /* unmask/ack legacy (INTx) interrupts */
202 vfio_ack_intx(const struct rte_intr_handle *intr_handle)
204 struct vfio_irq_set irq_set;
207 memset(&irq_set, 0, sizeof(irq_set));
208 irq_set.argsz = sizeof(irq_set);
210 irq_set.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
211 irq_set.index = VFIO_PCI_INTX_IRQ_INDEX;
214 if (ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, &irq_set)) {
215 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
222 /* enable MSI interrupts */
224 vfio_enable_msi(const struct rte_intr_handle *intr_handle) {
226 char irq_set_buf[IRQ_SET_BUF_LEN];
227 struct vfio_irq_set *irq_set;
230 len = sizeof(irq_set_buf);
232 irq_set = (struct vfio_irq_set *) irq_set_buf;
233 irq_set->argsz = len;
235 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
236 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
238 fd_ptr = (int *) &irq_set->data;
239 *fd_ptr = intr_handle->fd;
241 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
244 RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n",
251 /* disable MSI interrupts */
253 vfio_disable_msi(const struct rte_intr_handle *intr_handle) {
254 struct vfio_irq_set *irq_set;
255 char irq_set_buf[IRQ_SET_BUF_LEN];
258 len = sizeof(struct vfio_irq_set);
260 irq_set = (struct vfio_irq_set *) irq_set_buf;
261 irq_set->argsz = len;
263 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
264 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
267 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
271 "Error disabling MSI interrupts for fd %d\n", intr_handle->fd);
276 /* enable MSI-X interrupts */
278 vfio_enable_msix(const struct rte_intr_handle *intr_handle) {
280 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
281 struct vfio_irq_set *irq_set;
284 len = sizeof(irq_set_buf);
286 irq_set = (struct vfio_irq_set *) irq_set_buf;
287 irq_set->argsz = len;
288 /* 0 < irq_set->count < RTE_MAX_RXTX_INTR_VEC_ID + 1 */
289 irq_set->count = intr_handle->max_intr ?
290 (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID + 1 ?
291 RTE_MAX_RXTX_INTR_VEC_ID + 1 : intr_handle->max_intr) : 1;
292 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
293 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
295 fd_ptr = (int *) &irq_set->data;
296 /* INTR vector offset 0 reserve for non-efds mapping */
297 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = intr_handle->fd;
298 memcpy(&fd_ptr[RTE_INTR_VEC_RXTX_OFFSET], intr_handle->efds,
299 sizeof(*intr_handle->efds) * intr_handle->nb_efd);
301 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
304 RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n",
312 /* disable MSI-X interrupts */
314 vfio_disable_msix(const struct rte_intr_handle *intr_handle) {
315 struct vfio_irq_set *irq_set;
316 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
319 len = sizeof(struct vfio_irq_set);
321 irq_set = (struct vfio_irq_set *) irq_set_buf;
322 irq_set->argsz = len;
324 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
325 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
328 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
332 "Error disabling MSI-X interrupts for fd %d\n", intr_handle->fd);
337 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
338 /* enable req notifier */
340 vfio_enable_req(const struct rte_intr_handle *intr_handle)
343 char irq_set_buf[IRQ_SET_BUF_LEN];
344 struct vfio_irq_set *irq_set;
347 len = sizeof(irq_set_buf);
349 irq_set = (struct vfio_irq_set *) irq_set_buf;
350 irq_set->argsz = len;
352 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
353 VFIO_IRQ_SET_ACTION_TRIGGER;
354 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
356 fd_ptr = (int *) &irq_set->data;
357 *fd_ptr = intr_handle->fd;
359 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
362 RTE_LOG(ERR, EAL, "Error enabling req interrupts for fd %d\n",
370 /* disable req notifier */
372 vfio_disable_req(const struct rte_intr_handle *intr_handle)
374 struct vfio_irq_set *irq_set;
375 char irq_set_buf[IRQ_SET_BUF_LEN];
378 len = sizeof(struct vfio_irq_set);
380 irq_set = (struct vfio_irq_set *) irq_set_buf;
381 irq_set->argsz = len;
383 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
384 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
387 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
390 RTE_LOG(ERR, EAL, "Error disabling req interrupts for fd %d\n",
399 uio_intx_intr_disable(const struct rte_intr_handle *intr_handle)
401 unsigned char command_high;
403 /* use UIO config file descriptor for uio_pci_generic */
404 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
406 "Error reading interrupts status for fd %d\n",
407 intr_handle->uio_cfg_fd);
410 /* disable interrupts */
412 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
414 "Error disabling interrupts for fd %d\n",
415 intr_handle->uio_cfg_fd);
423 uio_intx_intr_enable(const struct rte_intr_handle *intr_handle)
425 unsigned char command_high;
427 /* use UIO config file descriptor for uio_pci_generic */
428 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
430 "Error reading interrupts status for fd %d\n",
431 intr_handle->uio_cfg_fd);
434 /* enable interrupts */
435 command_high &= ~0x4;
436 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
438 "Error enabling interrupts for fd %d\n",
439 intr_handle->uio_cfg_fd);
447 uio_intr_disable(const struct rte_intr_handle *intr_handle)
451 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
453 "Error disabling interrupts for fd %d (%s)\n",
454 intr_handle->fd, strerror(errno));
461 uio_intr_enable(const struct rte_intr_handle *intr_handle)
465 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
467 "Error enabling interrupts for fd %d (%s)\n",
468 intr_handle->fd, strerror(errno));
475 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
476 rte_intr_callback_fn cb, void *cb_arg)
478 int ret, wake_thread;
479 struct rte_intr_source *src;
480 struct rte_intr_callback *callback;
484 /* first do parameter checking */
485 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
487 "Registering with invalid input parameter\n");
491 /* allocate a new interrupt callback entity */
492 callback = calloc(1, sizeof(*callback));
493 if (callback == NULL) {
494 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
497 callback->cb_fn = cb;
498 callback->cb_arg = cb_arg;
499 callback->pending_delete = 0;
500 callback->ucb_fn = NULL;
502 rte_spinlock_lock(&intr_lock);
504 /* check if there is at least one callback registered for the fd */
505 TAILQ_FOREACH(src, &intr_sources, next) {
506 if (src->intr_handle.fd == intr_handle->fd) {
507 /* we had no interrupts for this */
508 if (TAILQ_EMPTY(&src->callbacks))
511 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
517 /* no existing callbacks for this - add new source */
519 src = calloc(1, sizeof(*src));
521 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
525 src->intr_handle = *intr_handle;
526 TAILQ_INIT(&src->callbacks);
527 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
528 TAILQ_INSERT_TAIL(&intr_sources, src, next);
534 rte_spinlock_unlock(&intr_lock);
537 * check if need to notify the pipe fd waited by epoll_wait to
538 * rebuild the wait list.
541 if (write(intr_pipe.writefd, "1", 1) < 0)
544 rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
549 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
550 rte_intr_callback_fn cb_fn, void *cb_arg,
551 rte_intr_unregister_callback_fn ucb_fn)
554 struct rte_intr_source *src;
555 struct rte_intr_callback *cb, *next;
557 /* do parameter checking first */
558 if (intr_handle == NULL || intr_handle->fd < 0) {
560 "Unregistering with invalid input parameter\n");
564 rte_spinlock_lock(&intr_lock);
566 /* check if the insterrupt source for the fd is existent */
567 TAILQ_FOREACH(src, &intr_sources, next)
568 if (src->intr_handle.fd == intr_handle->fd)
571 /* No interrupt source registered for the fd */
575 /* only usable if the source is active */
576 } else if (src->active == 0) {
582 /* walk through the callbacks and mark all that match. */
583 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
584 next = TAILQ_NEXT(cb, next);
585 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
586 cb->cb_arg == cb_arg)) {
587 cb->pending_delete = 1;
594 rte_spinlock_unlock(&intr_lock);
600 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
601 rte_intr_callback_fn cb_fn, void *cb_arg)
604 struct rte_intr_source *src;
605 struct rte_intr_callback *cb, *next;
607 /* do parameter checking first */
608 if (intr_handle == NULL || intr_handle->fd < 0) {
610 "Unregistering with invalid input parameter\n");
614 rte_spinlock_lock(&intr_lock);
616 /* check if the insterrupt source for the fd is existent */
617 TAILQ_FOREACH(src, &intr_sources, next)
618 if (src->intr_handle.fd == intr_handle->fd)
621 /* No interrupt source registered for the fd */
625 /* interrupt source has some active callbacks right now. */
626 } else if (src->active != 0) {
633 /*walk through the callbacks and remove all that match. */
634 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
636 next = TAILQ_NEXT(cb, next);
638 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
639 cb->cb_arg == cb_arg)) {
640 TAILQ_REMOVE(&src->callbacks, cb, next);
646 /* all callbacks for that source are removed. */
647 if (TAILQ_EMPTY(&src->callbacks)) {
648 TAILQ_REMOVE(&intr_sources, src, next);
653 rte_spinlock_unlock(&intr_lock);
655 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
656 if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
660 rte_eal_trace_intr_callback_unregister(intr_handle, cb_fn, cb_arg,
666 rte_intr_enable(const struct rte_intr_handle *intr_handle)
670 if (intr_handle == NULL)
673 if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
678 if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
683 switch (intr_handle->type){
684 /* write to the uio fd to enable the interrupt */
685 case RTE_INTR_HANDLE_UIO:
686 if (uio_intr_enable(intr_handle))
689 case RTE_INTR_HANDLE_UIO_INTX:
690 if (uio_intx_intr_enable(intr_handle))
693 /* not used at this moment */
694 case RTE_INTR_HANDLE_ALARM:
698 case RTE_INTR_HANDLE_VFIO_MSIX:
699 if (vfio_enable_msix(intr_handle))
702 case RTE_INTR_HANDLE_VFIO_MSI:
703 if (vfio_enable_msi(intr_handle))
706 case RTE_INTR_HANDLE_VFIO_LEGACY:
707 if (vfio_enable_intx(intr_handle))
710 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
711 case RTE_INTR_HANDLE_VFIO_REQ:
712 if (vfio_enable_req(intr_handle))
717 /* not used at this moment */
718 case RTE_INTR_HANDLE_DEV_EVENT:
721 /* unknown handle type */
724 "Unknown handle type of fd %d\n",
730 rte_eal_trace_intr_enable(intr_handle, rc);
735 * PMD generally calls this function at the end of its IRQ callback.
736 * Internally, it unmasks the interrupt if possible.
738 * For INTx, unmasking is required as the interrupt is auto-masked prior to
741 * For MSI/MSI-X, unmasking is typically not needed as the interrupt is not
742 * auto-masked. In fact, for interrupt handle types VFIO_MSIX and VFIO_MSI,
743 * this function is no-op.
746 rte_intr_ack(const struct rte_intr_handle *intr_handle)
748 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
751 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
754 switch (intr_handle->type) {
755 /* Both acking and enabling are same for UIO */
756 case RTE_INTR_HANDLE_UIO:
757 if (uio_intr_enable(intr_handle))
760 case RTE_INTR_HANDLE_UIO_INTX:
761 if (uio_intx_intr_enable(intr_handle))
764 /* not used at this moment */
765 case RTE_INTR_HANDLE_ALARM:
768 /* VFIO MSI* is implicitly acked unlike INTx, nothing to do */
769 case RTE_INTR_HANDLE_VFIO_MSIX:
770 case RTE_INTR_HANDLE_VFIO_MSI:
772 case RTE_INTR_HANDLE_VFIO_LEGACY:
773 if (vfio_ack_intx(intr_handle))
776 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
777 case RTE_INTR_HANDLE_VFIO_REQ:
781 /* not used at this moment */
782 case RTE_INTR_HANDLE_DEV_EVENT:
784 /* unknown handle type */
786 RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
795 rte_intr_disable(const struct rte_intr_handle *intr_handle)
799 if (intr_handle == NULL)
802 if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
807 if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
812 switch (intr_handle->type){
813 /* write to the uio fd to disable the interrupt */
814 case RTE_INTR_HANDLE_UIO:
815 if (uio_intr_disable(intr_handle))
818 case RTE_INTR_HANDLE_UIO_INTX:
819 if (uio_intx_intr_disable(intr_handle))
822 /* not used at this moment */
823 case RTE_INTR_HANDLE_ALARM:
827 case RTE_INTR_HANDLE_VFIO_MSIX:
828 if (vfio_disable_msix(intr_handle))
831 case RTE_INTR_HANDLE_VFIO_MSI:
832 if (vfio_disable_msi(intr_handle))
835 case RTE_INTR_HANDLE_VFIO_LEGACY:
836 if (vfio_disable_intx(intr_handle))
839 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
840 case RTE_INTR_HANDLE_VFIO_REQ:
841 if (vfio_disable_req(intr_handle))
846 /* not used at this moment */
847 case RTE_INTR_HANDLE_DEV_EVENT:
850 /* unknown handle type */
853 "Unknown handle type of fd %d\n",
859 rte_eal_trace_intr_disable(intr_handle, rc);
864 eal_intr_process_interrupts(struct epoll_event *events, int nfds)
867 int n, bytes_read, rv;
868 struct rte_intr_source *src;
869 struct rte_intr_callback *cb, *next;
870 union rte_intr_read_buffer buf;
871 struct rte_intr_callback active_cb;
873 for (n = 0; n < nfds; n++) {
876 * if the pipe fd is ready to read, return out to
877 * rebuild the wait list.
879 if (events[n].data.fd == intr_pipe.readfd){
880 int r = read(intr_pipe.readfd, buf.charbuf,
881 sizeof(buf.charbuf));
885 rte_spinlock_lock(&intr_lock);
886 TAILQ_FOREACH(src, &intr_sources, next)
887 if (src->intr_handle.fd ==
891 rte_spinlock_unlock(&intr_lock);
895 /* mark this interrupt source as active and release the lock. */
897 rte_spinlock_unlock(&intr_lock);
899 /* set the length to be read dor different handle type */
900 switch (src->intr_handle.type) {
901 case RTE_INTR_HANDLE_UIO:
902 case RTE_INTR_HANDLE_UIO_INTX:
903 bytes_read = sizeof(buf.uio_intr_count);
905 case RTE_INTR_HANDLE_ALARM:
906 bytes_read = sizeof(buf.timerfd_num);
909 case RTE_INTR_HANDLE_VFIO_MSIX:
910 case RTE_INTR_HANDLE_VFIO_MSI:
911 case RTE_INTR_HANDLE_VFIO_LEGACY:
912 bytes_read = sizeof(buf.vfio_intr_count);
914 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
915 case RTE_INTR_HANDLE_VFIO_REQ:
921 case RTE_INTR_HANDLE_VDEV:
922 case RTE_INTR_HANDLE_EXT:
926 case RTE_INTR_HANDLE_DEV_EVENT:
935 if (bytes_read > 0) {
937 * read out to clear the ready-to-be-read flag
940 bytes_read = read(events[n].data.fd, &buf, bytes_read);
941 if (bytes_read < 0) {
942 if (errno == EINTR || errno == EWOULDBLOCK)
945 RTE_LOG(ERR, EAL, "Error reading from file "
946 "descriptor %d: %s\n",
950 * The device is unplugged or buggy, remove
951 * it as an interrupt source and return to
952 * force the wait list to be rebuilt.
954 rte_spinlock_lock(&intr_lock);
955 TAILQ_REMOVE(&intr_sources, src, next);
956 rte_spinlock_unlock(&intr_lock);
958 for (cb = TAILQ_FIRST(&src->callbacks); cb;
960 next = TAILQ_NEXT(cb, next);
961 TAILQ_REMOVE(&src->callbacks, cb, next);
966 } else if (bytes_read == 0)
967 RTE_LOG(ERR, EAL, "Read nothing from file "
968 "descriptor %d\n", events[n].data.fd);
973 /* grab a lock, again to call callbacks and update status. */
974 rte_spinlock_lock(&intr_lock);
978 /* Finally, call all callbacks. */
979 TAILQ_FOREACH(cb, &src->callbacks, next) {
981 /* make a copy and unlock. */
983 rte_spinlock_unlock(&intr_lock);
985 /* call the actual callback */
986 active_cb.cb_fn(active_cb.cb_arg);
988 /*get the lock back. */
989 rte_spinlock_lock(&intr_lock);
992 /* we done with that interrupt source, release it. */
997 /* check if any callback are supposed to be removed */
998 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
999 next = TAILQ_NEXT(cb, next);
1000 if (cb->pending_delete) {
1001 TAILQ_REMOVE(&src->callbacks, cb, next);
1003 cb->ucb_fn(&src->intr_handle, cb->cb_arg);
1009 /* all callbacks for that source are removed. */
1010 if (TAILQ_EMPTY(&src->callbacks)) {
1011 TAILQ_REMOVE(&intr_sources, src, next);
1015 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
1016 if (rv > 0 && write(intr_pipe.writefd, "1", 1) < 0) {
1017 rte_spinlock_unlock(&intr_lock);
1021 rte_spinlock_unlock(&intr_lock);
1028 * It handles all the interrupts.
1031 * epoll file descriptor.
1033 * The number of file descriptors added in epoll.
1039 eal_intr_handle_interrupts(int pfd, unsigned totalfds)
1041 struct epoll_event events[totalfds];
1045 nfds = epoll_wait(pfd, events, totalfds,
1046 EAL_INTR_EPOLL_WAIT_FOREVER);
1047 /* epoll_wait fail */
1052 "epoll_wait returns with fail\n");
1055 /* epoll_wait timeout, will never happens here */
1058 /* epoll_wait has at least one fd ready to read */
1059 if (eal_intr_process_interrupts(events, nfds) < 0)
1065 * It builds/rebuilds up the epoll file descriptor with all the
1066 * file descriptors being waited on. Then handles the interrupts.
1074 static __rte_noreturn void *
1075 eal_intr_thread_main(__rte_unused void *arg)
1077 /* host thread, never break out */
1079 /* build up the epoll fd with all descriptors we are to
1080 * wait on then pass it to the handle_interrupts function
1082 static struct epoll_event pipe_event = {
1083 .events = EPOLLIN | EPOLLPRI,
1085 struct rte_intr_source *src;
1086 unsigned numfds = 0;
1088 /* create epoll fd */
1089 int pfd = epoll_create(1);
1091 rte_panic("Cannot create epoll instance\n");
1093 pipe_event.data.fd = intr_pipe.readfd;
1095 * add pipe fd into wait list, this pipe is used to
1096 * rebuild the wait list.
1098 if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
1100 rte_panic("Error adding fd to %d epoll_ctl, %s\n",
1101 intr_pipe.readfd, strerror(errno));
1105 rte_spinlock_lock(&intr_lock);
1107 TAILQ_FOREACH(src, &intr_sources, next) {
1108 struct epoll_event ev;
1110 if (src->callbacks.tqh_first == NULL)
1111 continue; /* skip those with no callbacks */
1112 memset(&ev, 0, sizeof(ev));
1113 ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP;
1114 ev.data.fd = src->intr_handle.fd;
1117 * add all the uio device file descriptor
1120 if (epoll_ctl(pfd, EPOLL_CTL_ADD,
1121 src->intr_handle.fd, &ev) < 0){
1122 rte_panic("Error adding fd %d epoll_ctl, %s\n",
1123 src->intr_handle.fd, strerror(errno));
1128 rte_spinlock_unlock(&intr_lock);
1129 /* serve the interrupt */
1130 eal_intr_handle_interrupts(pfd, numfds);
1133 * when we return, we need to rebuild the
1134 * list of fds to monitor.
1141 rte_eal_intr_init(void)
1145 /* init the global interrupt source head */
1146 TAILQ_INIT(&intr_sources);
1149 * create a pipe which will be waited by epoll and notified to
1150 * rebuild the wait list of epoll.
1152 if (pipe(intr_pipe.pipefd) < 0) {
1157 /* create the host thread to wait/handle the interrupt */
1158 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
1159 eal_intr_thread_main, NULL);
1163 "Failed to create thread for interrupt handling\n");
1170 eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
1172 union rte_intr_read_buffer buf;
1176 switch (intr_handle->type) {
1177 case RTE_INTR_HANDLE_UIO:
1178 case RTE_INTR_HANDLE_UIO_INTX:
1179 bytes_read = sizeof(buf.uio_intr_count);
1182 case RTE_INTR_HANDLE_VFIO_MSIX:
1183 case RTE_INTR_HANDLE_VFIO_MSI:
1184 case RTE_INTR_HANDLE_VFIO_LEGACY:
1185 bytes_read = sizeof(buf.vfio_intr_count);
1188 case RTE_INTR_HANDLE_VDEV:
1189 bytes_read = intr_handle->efd_counter_size;
1190 /* For vdev, number of bytes to read is set by driver */
1192 case RTE_INTR_HANDLE_EXT:
1196 RTE_LOG(INFO, EAL, "unexpected intr type\n");
1201 * read out to clear the ready-to-be-read flag
1204 if (bytes_read == 0)
1207 nbytes = read(fd, &buf, bytes_read);
1209 if (errno == EINTR || errno == EWOULDBLOCK ||
1213 "Error reading from fd %d: %s\n",
1214 fd, strerror(errno));
1215 } else if (nbytes == 0)
1216 RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd);
1222 eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
1223 struct rte_epoll_event *events)
1225 unsigned int i, count = 0;
1226 struct rte_epoll_event *rev;
1227 uint32_t valid_status;
1229 for (i = 0; i < n; i++) {
1230 rev = evs[i].data.ptr;
1231 valid_status = RTE_EPOLL_VALID;
1232 /* ACQUIRE memory ordering here pairs with RELEASE
1233 * ordering below acting as a lock to synchronize
1234 * the event data updating.
1236 if (!rev || !__atomic_compare_exchange_n(&rev->status,
1237 &valid_status, RTE_EPOLL_EXEC, 0,
1238 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
1241 events[count].status = RTE_EPOLL_VALID;
1242 events[count].fd = rev->fd;
1243 events[count].epfd = rev->epfd;
1244 events[count].epdata.event = evs[i].events;
1245 events[count].epdata.data = rev->epdata.data;
1246 if (rev->epdata.cb_fun)
1247 rev->epdata.cb_fun(rev->fd,
1248 rev->epdata.cb_arg);
1250 /* the status update should be observed after
1251 * the other fields change.
1253 __atomic_store_n(&rev->status, RTE_EPOLL_VALID,
1261 eal_init_tls_epfd(void)
1263 int pfd = epoll_create(255);
1267 "Cannot create epoll instance\n");
1274 rte_intr_tls_epfd(void)
1276 if (RTE_PER_LCORE(_epfd) == -1)
1277 RTE_PER_LCORE(_epfd) = eal_init_tls_epfd();
1279 return RTE_PER_LCORE(_epfd);
1283 eal_epoll_wait(int epfd, struct rte_epoll_event *events,
1284 int maxevents, int timeout, bool interruptible)
1286 struct epoll_event evs[maxevents];
1290 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1294 /* using per thread epoll fd */
1295 if (epfd == RTE_EPOLL_PER_THREAD)
1296 epfd = rte_intr_tls_epfd();
1299 rc = epoll_wait(epfd, evs, maxevents, timeout);
1300 if (likely(rc > 0)) {
1301 /* epoll_wait has at least one fd ready to read */
1302 rc = eal_epoll_process_event(evs, rc, events);
1304 } else if (rc < 0) {
1305 if (errno == EINTR) {
1311 /* epoll_wait fail */
1312 RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
1317 /* rc == 0, epoll_wait timed out */
1326 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
1327 int maxevents, int timeout)
1329 return eal_epoll_wait(epfd, events, maxevents, timeout, false);
1333 rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
1334 int maxevents, int timeout)
1336 return eal_epoll_wait(epfd, events, maxevents, timeout, true);
1340 eal_epoll_data_safe_free(struct rte_epoll_event *ev)
1342 uint32_t valid_status = RTE_EPOLL_VALID;
1344 while (!__atomic_compare_exchange_n(&ev->status, &valid_status,
1345 RTE_EPOLL_INVALID, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
1346 while (__atomic_load_n(&ev->status,
1347 __ATOMIC_RELAXED) != RTE_EPOLL_VALID)
1349 valid_status = RTE_EPOLL_VALID;
1351 memset(&ev->epdata, 0, sizeof(ev->epdata));
1357 rte_epoll_ctl(int epfd, int op, int fd,
1358 struct rte_epoll_event *event)
1360 struct epoll_event ev;
1363 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1367 /* using per thread epoll fd */
1368 if (epfd == RTE_EPOLL_PER_THREAD)
1369 epfd = rte_intr_tls_epfd();
1371 if (op == EPOLL_CTL_ADD) {
1372 __atomic_store_n(&event->status, RTE_EPOLL_VALID,
1374 event->fd = fd; /* ignore fd in event */
1376 ev.data.ptr = (void *)event;
1379 ev.events = event->epdata.event;
1380 if (epoll_ctl(epfd, op, fd, &ev) < 0) {
1381 RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n",
1382 op, fd, strerror(errno));
1383 if (op == EPOLL_CTL_ADD)
1384 /* rollback status when CTL_ADD fail */
1385 __atomic_store_n(&event->status, RTE_EPOLL_INVALID,
1390 if (op == EPOLL_CTL_DEL && __atomic_load_n(&event->status,
1391 __ATOMIC_RELAXED) != RTE_EPOLL_INVALID)
1392 eal_epoll_data_safe_free(event);
1398 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
1399 int op, unsigned int vec, void *data)
1401 struct rte_epoll_event *rev;
1402 struct rte_epoll_data *epdata;
1404 unsigned int efd_idx;
1407 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
1408 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
1410 if (!intr_handle || intr_handle->nb_efd == 0 ||
1411 efd_idx >= intr_handle->nb_efd) {
1412 RTE_LOG(ERR, EAL, "Wrong intr vector number.\n");
1417 case RTE_INTR_EVENT_ADD:
1418 epfd_op = EPOLL_CTL_ADD;
1419 rev = &intr_handle->elist[efd_idx];
1420 if (__atomic_load_n(&rev->status,
1421 __ATOMIC_RELAXED) != RTE_EPOLL_INVALID) {
1422 RTE_LOG(INFO, EAL, "Event already been added.\n");
1426 /* attach to intr vector fd */
1427 epdata = &rev->epdata;
1428 epdata->event = EPOLLIN | EPOLLPRI | EPOLLET;
1429 epdata->data = data;
1430 epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr;
1431 epdata->cb_arg = (void *)intr_handle;
1432 rc = rte_epoll_ctl(epfd, epfd_op,
1433 intr_handle->efds[efd_idx], rev);
1436 "efd %d associated with vec %d added on epfd %d"
1437 "\n", rev->fd, vec, epfd);
1441 case RTE_INTR_EVENT_DEL:
1442 epfd_op = EPOLL_CTL_DEL;
1443 rev = &intr_handle->elist[efd_idx];
1444 if (__atomic_load_n(&rev->status,
1445 __ATOMIC_RELAXED) == RTE_EPOLL_INVALID) {
1446 RTE_LOG(INFO, EAL, "Event does not exist.\n");
1450 rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev);
1455 RTE_LOG(ERR, EAL, "event op type mismatch\n");
1463 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
1466 struct rte_epoll_event *rev;
1468 for (i = 0; i < intr_handle->nb_efd; i++) {
1469 rev = &intr_handle->elist[i];
1470 if (__atomic_load_n(&rev->status,
1471 __ATOMIC_RELAXED) == RTE_EPOLL_INVALID)
1473 if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
1474 /* force free if the entry valid */
1475 eal_epoll_data_safe_free(rev);
1481 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
1485 uint32_t n = RTE_MIN(nb_efd, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1487 assert(nb_efd != 0);
1489 if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) {
1490 for (i = 0; i < n; i++) {
1491 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1494 "can't setup eventfd, error %i (%s)\n",
1495 errno, strerror(errno));
1498 intr_handle->efds[i] = fd;
1500 intr_handle->nb_efd = n;
1501 intr_handle->max_intr = NB_OTHER_INTR + n;
1502 } else if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
1503 /* only check, initialization would be done in vdev driver.*/
1504 if (intr_handle->efd_counter_size >
1505 sizeof(union rte_intr_read_buffer)) {
1506 RTE_LOG(ERR, EAL, "the efd_counter_size is oversized");
1510 intr_handle->efds[0] = intr_handle->fd;
1511 intr_handle->nb_efd = RTE_MIN(nb_efd, 1U);
1512 intr_handle->max_intr = NB_OTHER_INTR;
1519 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
1523 rte_intr_free_epoll_fd(intr_handle);
1524 if (intr_handle->max_intr > intr_handle->nb_efd) {
1525 for (i = 0; i < intr_handle->nb_efd; i++)
1526 close(intr_handle->efds[i]);
1528 intr_handle->nb_efd = 0;
1529 intr_handle->max_intr = 0;
1533 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
1535 return !(!intr_handle->nb_efd);
1539 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
1541 if (!rte_intr_dp_is_en(intr_handle))
1544 return !!(intr_handle->max_intr - intr_handle->nb_efd);
1548 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
1550 if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)
1553 if (intr_handle->type == RTE_INTR_HANDLE_VDEV)
1559 int rte_thread_is_intr(void)
1561 return pthread_equal(intr_thread, pthread_self());