1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
15 #include <sys/epoll.h>
16 #include <sys/signalfd.h>
17 #include <sys/ioctl.h>
18 #include <sys/eventfd.h>
22 #include <rte_common.h>
23 #include <rte_interrupts.h>
24 #include <rte_memory.h>
25 #include <rte_launch.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_debug.h>
32 #include <rte_errno.h>
33 #include <rte_spinlock.h>
34 #include <rte_pause.h>
36 #include <rte_eal_trace.h>
38 #include "eal_private.h"
40 #include "eal_thread.h"
42 #define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
43 #define NB_OTHER_INTR 1
45 static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
61 * union buffer for reading on different devices
63 union rte_intr_read_buffer {
64 int uio_intr_count; /* for uio device */
66 uint64_t vfio_intr_count; /* for vfio device */
68 uint64_t timerfd_num; /* for timerfd */
69 char charbuf[16]; /* for others */
72 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
73 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
75 struct rte_intr_callback {
76 TAILQ_ENTRY(rte_intr_callback) next;
77 rte_intr_callback_fn cb_fn; /**< callback address */
78 void *cb_arg; /**< parameter for callback */
79 uint8_t pending_delete; /**< delete after callback is called */
80 rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
83 struct rte_intr_source {
84 TAILQ_ENTRY(rte_intr_source) next;
85 struct rte_intr_handle *intr_handle; /**< interrupt handle */
86 struct rte_intr_cb_list callbacks; /**< user callbacks */
90 /* global spinlock for interrupt data operation */
91 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
93 /* union buffer for pipe read/write */
94 static union intr_pipefds intr_pipe;
96 /* interrupt sources list */
97 static struct rte_intr_source_list intr_sources;
99 /* interrupt handling thread */
100 static pthread_t intr_thread;
102 /* VFIO interrupts */
105 #define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
106 /* irq set buffer length for queue interrupts and LSC interrupt */
107 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
108 sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1))
110 /* enable legacy (INTx) interrupts */
112 vfio_enable_intx(const struct rte_intr_handle *intr_handle) {
113 struct vfio_irq_set *irq_set;
114 char irq_set_buf[IRQ_SET_BUF_LEN];
115 int len, ret, vfio_dev_fd;
118 len = sizeof(irq_set_buf);
121 irq_set = (struct vfio_irq_set *) irq_set_buf;
122 irq_set->argsz = len;
124 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
125 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
127 fd_ptr = (int *) &irq_set->data;
128 *fd_ptr = rte_intr_fd_get(intr_handle);
130 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
131 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
134 RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n",
135 rte_intr_fd_get(intr_handle));
139 /* unmask INTx after enabling */
140 memset(irq_set, 0, len);
141 len = sizeof(struct vfio_irq_set);
142 irq_set->argsz = len;
144 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
145 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
148 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
151 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
152 rte_intr_fd_get(intr_handle));
158 /* disable legacy (INTx) interrupts */
160 vfio_disable_intx(const struct rte_intr_handle *intr_handle) {
161 struct vfio_irq_set *irq_set;
162 char irq_set_buf[IRQ_SET_BUF_LEN];
163 int len, ret, vfio_dev_fd;
165 len = sizeof(struct vfio_irq_set);
167 /* mask interrupts before disabling */
168 irq_set = (struct vfio_irq_set *) irq_set_buf;
169 irq_set->argsz = len;
171 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK;
172 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
175 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
176 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
179 RTE_LOG(ERR, EAL, "Error masking INTx interrupts for fd %d\n",
180 rte_intr_fd_get(intr_handle));
185 memset(irq_set, 0, len);
186 irq_set->argsz = len;
188 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
189 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
192 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
195 RTE_LOG(ERR, EAL, "Error disabling INTx interrupts for fd %d\n",
196 rte_intr_fd_get(intr_handle));
202 /* unmask/ack legacy (INTx) interrupts */
204 vfio_ack_intx(const struct rte_intr_handle *intr_handle)
206 struct vfio_irq_set irq_set;
210 memset(&irq_set, 0, sizeof(irq_set));
211 irq_set.argsz = sizeof(irq_set);
213 irq_set.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
214 irq_set.index = VFIO_PCI_INTX_IRQ_INDEX;
217 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
218 if (ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, &irq_set)) {
219 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
220 rte_intr_fd_get(intr_handle));
226 /* enable MSI interrupts */
228 vfio_enable_msi(const struct rte_intr_handle *intr_handle) {
230 char irq_set_buf[IRQ_SET_BUF_LEN];
231 struct vfio_irq_set *irq_set;
232 int *fd_ptr, vfio_dev_fd;
234 len = sizeof(irq_set_buf);
236 irq_set = (struct vfio_irq_set *) irq_set_buf;
237 irq_set->argsz = len;
239 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
240 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
242 fd_ptr = (int *) &irq_set->data;
243 *fd_ptr = rte_intr_fd_get(intr_handle);
245 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
246 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
249 RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n",
250 rte_intr_fd_get(intr_handle));
256 /* disable MSI interrupts */
258 vfio_disable_msi(const struct rte_intr_handle *intr_handle) {
259 struct vfio_irq_set *irq_set;
260 char irq_set_buf[IRQ_SET_BUF_LEN];
261 int len, ret, vfio_dev_fd;
263 len = sizeof(struct vfio_irq_set);
265 irq_set = (struct vfio_irq_set *) irq_set_buf;
266 irq_set->argsz = len;
268 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
269 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
272 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
273 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
275 RTE_LOG(ERR, EAL, "Error disabling MSI interrupts for fd %d\n",
276 rte_intr_fd_get(intr_handle));
281 /* enable MSI-X interrupts */
283 vfio_enable_msix(const struct rte_intr_handle *intr_handle) {
285 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
286 struct vfio_irq_set *irq_set;
287 int *fd_ptr, vfio_dev_fd, i;
289 len = sizeof(irq_set_buf);
291 irq_set = (struct vfio_irq_set *) irq_set_buf;
292 irq_set->argsz = len;
293 /* 0 < irq_set->count < RTE_MAX_RXTX_INTR_VEC_ID + 1 */
294 irq_set->count = rte_intr_max_intr_get(intr_handle) ?
295 (rte_intr_max_intr_get(intr_handle) >
296 RTE_MAX_RXTX_INTR_VEC_ID + 1 ? RTE_MAX_RXTX_INTR_VEC_ID + 1 :
297 rte_intr_max_intr_get(intr_handle)) : 1;
299 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
300 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
302 fd_ptr = (int *) &irq_set->data;
303 /* INTR vector offset 0 reserve for non-efds mapping */
304 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = rte_intr_fd_get(intr_handle);
305 for (i = 0; i < rte_intr_nb_efd_get(intr_handle); i++) {
306 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] =
307 rte_intr_efds_index_get(intr_handle, i);
310 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
311 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
314 RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n",
315 rte_intr_fd_get(intr_handle));
322 /* disable MSI-X interrupts */
324 vfio_disable_msix(const struct rte_intr_handle *intr_handle) {
325 struct vfio_irq_set *irq_set;
326 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
327 int len, ret, vfio_dev_fd;
329 len = sizeof(struct vfio_irq_set);
331 irq_set = (struct vfio_irq_set *) irq_set_buf;
332 irq_set->argsz = len;
334 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
335 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
338 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
339 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
342 RTE_LOG(ERR, EAL, "Error disabling MSI-X interrupts for fd %d\n",
343 rte_intr_fd_get(intr_handle));
348 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
349 /* enable req notifier */
351 vfio_enable_req(const struct rte_intr_handle *intr_handle)
354 char irq_set_buf[IRQ_SET_BUF_LEN];
355 struct vfio_irq_set *irq_set;
356 int *fd_ptr, vfio_dev_fd;
358 len = sizeof(irq_set_buf);
360 irq_set = (struct vfio_irq_set *) irq_set_buf;
361 irq_set->argsz = len;
363 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
364 VFIO_IRQ_SET_ACTION_TRIGGER;
365 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
367 fd_ptr = (int *) &irq_set->data;
368 *fd_ptr = rte_intr_fd_get(intr_handle);
370 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
371 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
374 RTE_LOG(ERR, EAL, "Error enabling req interrupts for fd %d\n",
375 rte_intr_fd_get(intr_handle));
382 /* disable req notifier */
384 vfio_disable_req(const struct rte_intr_handle *intr_handle)
386 struct vfio_irq_set *irq_set;
387 char irq_set_buf[IRQ_SET_BUF_LEN];
388 int len, ret, vfio_dev_fd;
390 len = sizeof(struct vfio_irq_set);
392 irq_set = (struct vfio_irq_set *) irq_set_buf;
393 irq_set->argsz = len;
395 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
396 irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
399 vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
400 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
403 RTE_LOG(ERR, EAL, "Error disabling req interrupts for fd %d\n",
404 rte_intr_fd_get(intr_handle));
412 uio_intx_intr_disable(const struct rte_intr_handle *intr_handle)
414 unsigned char command_high;
417 /* use UIO config file descriptor for uio_pci_generic */
418 uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
419 if (pread(uio_cfg_fd, &command_high, 1, 5) != 1) {
421 "Error reading interrupts status for fd %d\n",
425 /* disable interrupts */
427 if (pwrite(uio_cfg_fd, &command_high, 1, 5) != 1) {
429 "Error disabling interrupts for fd %d\n",
438 uio_intx_intr_enable(const struct rte_intr_handle *intr_handle)
440 unsigned char command_high;
443 /* use UIO config file descriptor for uio_pci_generic */
444 uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
445 if (pread(uio_cfg_fd, &command_high, 1, 5) != 1) {
447 "Error reading interrupts status for fd %d\n",
451 /* enable interrupts */
452 command_high &= ~0x4;
453 if (pwrite(uio_cfg_fd, &command_high, 1, 5) != 1) {
455 "Error enabling interrupts for fd %d\n",
464 uio_intr_disable(const struct rte_intr_handle *intr_handle)
468 if (write(rte_intr_fd_get(intr_handle), &value, sizeof(value)) < 0) {
469 RTE_LOG(ERR, EAL, "Error disabling interrupts for fd %d (%s)\n",
470 rte_intr_fd_get(intr_handle), strerror(errno));
477 uio_intr_enable(const struct rte_intr_handle *intr_handle)
481 if (write(rte_intr_fd_get(intr_handle), &value, sizeof(value)) < 0) {
482 RTE_LOG(ERR, EAL, "Error enabling interrupts for fd %d (%s)\n",
483 rte_intr_fd_get(intr_handle), strerror(errno));
490 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
491 rte_intr_callback_fn cb, void *cb_arg)
493 int ret, wake_thread;
494 struct rte_intr_source *src;
495 struct rte_intr_callback *callback;
499 /* first do parameter checking */
500 if (rte_intr_fd_get(intr_handle) < 0 || cb == NULL) {
501 RTE_LOG(ERR, EAL, "Registering with invalid input parameter\n");
505 /* allocate a new interrupt callback entity */
506 callback = calloc(1, sizeof(*callback));
507 if (callback == NULL) {
508 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
511 callback->cb_fn = cb;
512 callback->cb_arg = cb_arg;
513 callback->pending_delete = 0;
514 callback->ucb_fn = NULL;
516 rte_spinlock_lock(&intr_lock);
518 /* check if there is at least one callback registered for the fd */
519 TAILQ_FOREACH(src, &intr_sources, next) {
520 if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle)) {
521 /* we had no interrupts for this */
522 if (TAILQ_EMPTY(&src->callbacks))
525 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
531 /* no existing callbacks for this - add new source */
533 src = calloc(1, sizeof(*src));
535 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
540 src->intr_handle = rte_intr_instance_dup(intr_handle);
541 if (src->intr_handle == NULL) {
542 RTE_LOG(ERR, EAL, "Can not create intr instance\n");
549 TAILQ_INIT(&src->callbacks);
550 TAILQ_INSERT_TAIL(&(src->callbacks), callback,
552 TAILQ_INSERT_TAIL(&intr_sources, src, next);
559 rte_spinlock_unlock(&intr_lock);
562 * check if need to notify the pipe fd waited by epoll_wait to
563 * rebuild the wait list.
566 if (write(intr_pipe.writefd, "1", 1) < 0)
569 rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
574 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
575 rte_intr_callback_fn cb_fn, void *cb_arg,
576 rte_intr_unregister_callback_fn ucb_fn)
579 struct rte_intr_source *src;
580 struct rte_intr_callback *cb, *next;
582 /* do parameter checking first */
583 if (rte_intr_fd_get(intr_handle) < 0) {
584 RTE_LOG(ERR, EAL, "Unregistering with invalid input parameter\n");
588 rte_spinlock_lock(&intr_lock);
590 /* check if the insterrupt source for the fd is existent */
591 TAILQ_FOREACH(src, &intr_sources, next) {
592 if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
596 /* No interrupt source registered for the fd */
600 /* only usable if the source is active */
601 } else if (src->active == 0) {
607 /* walk through the callbacks and mark all that match. */
608 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
609 next = TAILQ_NEXT(cb, next);
610 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
611 cb->cb_arg == cb_arg)) {
612 cb->pending_delete = 1;
619 rte_spinlock_unlock(&intr_lock);
625 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
626 rte_intr_callback_fn cb_fn, void *cb_arg)
629 struct rte_intr_source *src;
630 struct rte_intr_callback *cb, *next;
632 /* do parameter checking first */
633 if (rte_intr_fd_get(intr_handle) < 0) {
634 RTE_LOG(ERR, EAL, "Unregistering with invalid input parameter\n");
638 rte_spinlock_lock(&intr_lock);
640 /* check if the insterrupt source for the fd is existent */
641 TAILQ_FOREACH(src, &intr_sources, next)
642 if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
645 /* No interrupt source registered for the fd */
649 /* interrupt source has some active callbacks right now. */
650 } else if (src->active != 0) {
657 /*walk through the callbacks and remove all that match. */
658 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
660 next = TAILQ_NEXT(cb, next);
662 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
663 cb->cb_arg == cb_arg)) {
664 TAILQ_REMOVE(&src->callbacks, cb, next);
670 /* all callbacks for that source are removed. */
671 if (TAILQ_EMPTY(&src->callbacks)) {
672 TAILQ_REMOVE(&intr_sources, src, next);
673 rte_intr_instance_free(src->intr_handle);
678 rte_spinlock_unlock(&intr_lock);
680 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
681 if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
685 rte_eal_trace_intr_callback_unregister(intr_handle, cb_fn, cb_arg,
691 rte_intr_callback_unregister_sync(const struct rte_intr_handle *intr_handle,
692 rte_intr_callback_fn cb_fn, void *cb_arg)
696 while ((ret = rte_intr_callback_unregister(intr_handle, cb_fn, cb_arg)) == -EAGAIN)
703 rte_intr_enable(const struct rte_intr_handle *intr_handle)
705 int rc = 0, uio_cfg_fd;
707 if (intr_handle == NULL)
710 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
715 uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
716 if (rte_intr_fd_get(intr_handle) < 0 || uio_cfg_fd < 0) {
721 switch (rte_intr_type_get(intr_handle)) {
722 /* write to the uio fd to enable the interrupt */
723 case RTE_INTR_HANDLE_UIO:
724 if (uio_intr_enable(intr_handle))
727 case RTE_INTR_HANDLE_UIO_INTX:
728 if (uio_intx_intr_enable(intr_handle))
731 /* not used at this moment */
732 case RTE_INTR_HANDLE_ALARM:
736 case RTE_INTR_HANDLE_VFIO_MSIX:
737 if (vfio_enable_msix(intr_handle))
740 case RTE_INTR_HANDLE_VFIO_MSI:
741 if (vfio_enable_msi(intr_handle))
744 case RTE_INTR_HANDLE_VFIO_LEGACY:
745 if (vfio_enable_intx(intr_handle))
748 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
749 case RTE_INTR_HANDLE_VFIO_REQ:
750 if (vfio_enable_req(intr_handle))
755 /* not used at this moment */
756 case RTE_INTR_HANDLE_DEV_EVENT:
759 /* unknown handle type */
761 RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
762 rte_intr_fd_get(intr_handle));
767 rte_eal_trace_intr_enable(intr_handle, rc);
772 * PMD generally calls this function at the end of its IRQ callback.
773 * Internally, it unmasks the interrupt if possible.
775 * For INTx, unmasking is required as the interrupt is auto-masked prior to
778 * For MSI/MSI-X, unmasking is typically not needed as the interrupt is not
779 * auto-masked. In fact, for interrupt handle types VFIO_MSIX and VFIO_MSI,
780 * this function is no-op.
783 rte_intr_ack(const struct rte_intr_handle *intr_handle)
787 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV)
790 uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
791 if (rte_intr_fd_get(intr_handle) < 0 || uio_cfg_fd < 0)
794 switch (rte_intr_type_get(intr_handle)) {
795 /* Both acking and enabling are same for UIO */
796 case RTE_INTR_HANDLE_UIO:
797 if (uio_intr_enable(intr_handle))
800 case RTE_INTR_HANDLE_UIO_INTX:
801 if (uio_intx_intr_enable(intr_handle))
804 /* not used at this moment */
805 case RTE_INTR_HANDLE_ALARM:
808 /* VFIO MSI* is implicitly acked unlike INTx, nothing to do */
809 case RTE_INTR_HANDLE_VFIO_MSIX:
810 case RTE_INTR_HANDLE_VFIO_MSI:
812 case RTE_INTR_HANDLE_VFIO_LEGACY:
813 if (vfio_ack_intx(intr_handle))
816 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
817 case RTE_INTR_HANDLE_VFIO_REQ:
821 /* not used at this moment */
822 case RTE_INTR_HANDLE_DEV_EVENT:
824 /* unknown handle type */
826 RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
827 rte_intr_fd_get(intr_handle));
835 rte_intr_disable(const struct rte_intr_handle *intr_handle)
837 int rc = 0, uio_cfg_fd;
839 if (intr_handle == NULL)
842 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
847 uio_cfg_fd = rte_intr_dev_fd_get(intr_handle);
848 if (rte_intr_fd_get(intr_handle) < 0 || uio_cfg_fd < 0) {
853 switch (rte_intr_type_get(intr_handle)) {
854 /* write to the uio fd to disable the interrupt */
855 case RTE_INTR_HANDLE_UIO:
856 if (uio_intr_disable(intr_handle))
859 case RTE_INTR_HANDLE_UIO_INTX:
860 if (uio_intx_intr_disable(intr_handle))
863 /* not used at this moment */
864 case RTE_INTR_HANDLE_ALARM:
868 case RTE_INTR_HANDLE_VFIO_MSIX:
869 if (vfio_disable_msix(intr_handle))
872 case RTE_INTR_HANDLE_VFIO_MSI:
873 if (vfio_disable_msi(intr_handle))
876 case RTE_INTR_HANDLE_VFIO_LEGACY:
877 if (vfio_disable_intx(intr_handle))
880 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
881 case RTE_INTR_HANDLE_VFIO_REQ:
882 if (vfio_disable_req(intr_handle))
887 /* not used at this moment */
888 case RTE_INTR_HANDLE_DEV_EVENT:
891 /* unknown handle type */
893 RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
894 rte_intr_fd_get(intr_handle));
899 rte_eal_trace_intr_disable(intr_handle, rc);
904 eal_intr_process_interrupts(struct epoll_event *events, int nfds)
907 int n, bytes_read, rv;
908 struct rte_intr_source *src;
909 struct rte_intr_callback *cb, *next;
910 union rte_intr_read_buffer buf;
911 struct rte_intr_callback active_cb;
913 for (n = 0; n < nfds; n++) {
916 * if the pipe fd is ready to read, return out to
917 * rebuild the wait list.
919 if (events[n].data.fd == intr_pipe.readfd){
920 int r = read(intr_pipe.readfd, buf.charbuf,
921 sizeof(buf.charbuf));
925 rte_spinlock_lock(&intr_lock);
926 TAILQ_FOREACH(src, &intr_sources, next)
927 if (rte_intr_fd_get(src->intr_handle) == events[n].data.fd)
930 rte_spinlock_unlock(&intr_lock);
934 /* mark this interrupt source as active and release the lock. */
936 rte_spinlock_unlock(&intr_lock);
938 /* set the length to be read dor different handle type */
939 switch (rte_intr_type_get(src->intr_handle)) {
940 case RTE_INTR_HANDLE_UIO:
941 case RTE_INTR_HANDLE_UIO_INTX:
942 bytes_read = sizeof(buf.uio_intr_count);
944 case RTE_INTR_HANDLE_ALARM:
945 bytes_read = sizeof(buf.timerfd_num);
948 case RTE_INTR_HANDLE_VFIO_MSIX:
949 case RTE_INTR_HANDLE_VFIO_MSI:
950 case RTE_INTR_HANDLE_VFIO_LEGACY:
951 bytes_read = sizeof(buf.vfio_intr_count);
953 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
954 case RTE_INTR_HANDLE_VFIO_REQ:
960 case RTE_INTR_HANDLE_VDEV:
961 case RTE_INTR_HANDLE_EXT:
965 case RTE_INTR_HANDLE_DEV_EVENT:
974 if (bytes_read > 0) {
976 * read out to clear the ready-to-be-read flag
979 bytes_read = read(events[n].data.fd, &buf, bytes_read);
980 if (bytes_read < 0) {
981 if (errno == EINTR || errno == EWOULDBLOCK)
984 RTE_LOG(ERR, EAL, "Error reading from file "
985 "descriptor %d: %s\n",
989 * The device is unplugged or buggy, remove
990 * it as an interrupt source and return to
991 * force the wait list to be rebuilt.
993 rte_spinlock_lock(&intr_lock);
994 TAILQ_REMOVE(&intr_sources, src, next);
995 rte_spinlock_unlock(&intr_lock);
997 for (cb = TAILQ_FIRST(&src->callbacks); cb;
999 next = TAILQ_NEXT(cb, next);
1000 TAILQ_REMOVE(&src->callbacks, cb, next);
1003 rte_intr_instance_free(src->intr_handle);
1006 } else if (bytes_read == 0)
1007 RTE_LOG(ERR, EAL, "Read nothing from file "
1008 "descriptor %d\n", events[n].data.fd);
1013 /* grab a lock, again to call callbacks and update status. */
1014 rte_spinlock_lock(&intr_lock);
1018 /* Finally, call all callbacks. */
1019 TAILQ_FOREACH(cb, &src->callbacks, next) {
1021 /* make a copy and unlock. */
1023 rte_spinlock_unlock(&intr_lock);
1025 /* call the actual callback */
1026 active_cb.cb_fn(active_cb.cb_arg);
1028 /*get the lock back. */
1029 rte_spinlock_lock(&intr_lock);
1032 /* we done with that interrupt source, release it. */
1037 /* check if any callback are supposed to be removed */
1038 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
1039 next = TAILQ_NEXT(cb, next);
1040 if (cb->pending_delete) {
1041 TAILQ_REMOVE(&src->callbacks, cb, next);
1043 cb->ucb_fn(src->intr_handle, cb->cb_arg);
1049 /* all callbacks for that source are removed. */
1050 if (TAILQ_EMPTY(&src->callbacks)) {
1051 TAILQ_REMOVE(&intr_sources, src, next);
1052 rte_intr_instance_free(src->intr_handle);
1056 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
1057 if (rv > 0 && write(intr_pipe.writefd, "1", 1) < 0) {
1058 rte_spinlock_unlock(&intr_lock);
1062 rte_spinlock_unlock(&intr_lock);
1069 * It handles all the interrupts.
1072 * epoll file descriptor.
1074 * The number of file descriptors added in epoll.
1080 eal_intr_handle_interrupts(int pfd, unsigned totalfds)
1082 struct epoll_event events[totalfds];
1086 nfds = epoll_wait(pfd, events, totalfds,
1087 EAL_INTR_EPOLL_WAIT_FOREVER);
1088 /* epoll_wait fail */
1093 "epoll_wait returns with fail\n");
1096 /* epoll_wait timeout, will never happens here */
1099 /* epoll_wait has at least one fd ready to read */
1100 if (eal_intr_process_interrupts(events, nfds) < 0)
1106 * It builds/rebuilds up the epoll file descriptor with all the
1107 * file descriptors being waited on. Then handles the interrupts.
1115 static __rte_noreturn void *
1116 eal_intr_thread_main(__rte_unused void *arg)
1118 /* host thread, never break out */
1120 /* build up the epoll fd with all descriptors we are to
1121 * wait on then pass it to the handle_interrupts function
1123 static struct epoll_event pipe_event = {
1124 .events = EPOLLIN | EPOLLPRI,
1126 struct rte_intr_source *src;
1127 unsigned numfds = 0;
1129 /* create epoll fd */
1130 int pfd = epoll_create(1);
1132 rte_panic("Cannot create epoll instance\n");
1134 pipe_event.data.fd = intr_pipe.readfd;
1136 * add pipe fd into wait list, this pipe is used to
1137 * rebuild the wait list.
1139 if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
1141 rte_panic("Error adding fd to %d epoll_ctl, %s\n",
1142 intr_pipe.readfd, strerror(errno));
1146 rte_spinlock_lock(&intr_lock);
1148 TAILQ_FOREACH(src, &intr_sources, next) {
1149 struct epoll_event ev;
1151 if (src->callbacks.tqh_first == NULL)
1152 continue; /* skip those with no callbacks */
1153 memset(&ev, 0, sizeof(ev));
1154 ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP;
1155 ev.data.fd = rte_intr_fd_get(src->intr_handle);
1158 * add all the uio device file descriptor
1161 if (epoll_ctl(pfd, EPOLL_CTL_ADD,
1162 rte_intr_fd_get(src->intr_handle), &ev) < 0) {
1163 rte_panic("Error adding fd %d epoll_ctl, %s\n",
1164 rte_intr_fd_get(src->intr_handle),
1170 rte_spinlock_unlock(&intr_lock);
1171 /* serve the interrupt */
1172 eal_intr_handle_interrupts(pfd, numfds);
1175 * when we return, we need to rebuild the
1176 * list of fds to monitor.
1183 rte_eal_intr_init(void)
1187 /* init the global interrupt source head */
1188 TAILQ_INIT(&intr_sources);
1191 * create a pipe which will be waited by epoll and notified to
1192 * rebuild the wait list of epoll.
1194 if (pipe(intr_pipe.pipefd) < 0) {
1199 /* create the host thread to wait/handle the interrupt */
1200 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
1201 eal_intr_thread_main, NULL);
1205 "Failed to create thread for interrupt handling\n");
1212 eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
1214 union rte_intr_read_buffer buf;
1218 switch (rte_intr_type_get(intr_handle)) {
1219 case RTE_INTR_HANDLE_UIO:
1220 case RTE_INTR_HANDLE_UIO_INTX:
1221 bytes_read = sizeof(buf.uio_intr_count);
1224 case RTE_INTR_HANDLE_VFIO_MSIX:
1225 case RTE_INTR_HANDLE_VFIO_MSI:
1226 case RTE_INTR_HANDLE_VFIO_LEGACY:
1227 bytes_read = sizeof(buf.vfio_intr_count);
1230 case RTE_INTR_HANDLE_VDEV:
1231 bytes_read = rte_intr_efd_counter_size_get(intr_handle);
1232 /* For vdev, number of bytes to read is set by driver */
1234 case RTE_INTR_HANDLE_EXT:
1238 RTE_LOG(INFO, EAL, "unexpected intr type\n");
1243 * read out to clear the ready-to-be-read flag
1246 if (bytes_read == 0)
1249 nbytes = read(fd, &buf, bytes_read);
1251 if (errno == EINTR || errno == EWOULDBLOCK ||
1255 "Error reading from fd %d: %s\n",
1256 fd, strerror(errno));
1257 } else if (nbytes == 0)
1258 RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd);
1264 eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
1265 struct rte_epoll_event *events)
1267 unsigned int i, count = 0;
1268 struct rte_epoll_event *rev;
1269 uint32_t valid_status;
1271 for (i = 0; i < n; i++) {
1272 rev = evs[i].data.ptr;
1273 valid_status = RTE_EPOLL_VALID;
1274 /* ACQUIRE memory ordering here pairs with RELEASE
1275 * ordering below acting as a lock to synchronize
1276 * the event data updating.
1278 if (!rev || !__atomic_compare_exchange_n(&rev->status,
1279 &valid_status, RTE_EPOLL_EXEC, 0,
1280 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
1283 events[count].status = RTE_EPOLL_VALID;
1284 events[count].fd = rev->fd;
1285 events[count].epfd = rev->epfd;
1286 events[count].epdata.event = evs[i].events;
1287 events[count].epdata.data = rev->epdata.data;
1288 if (rev->epdata.cb_fun)
1289 rev->epdata.cb_fun(rev->fd,
1290 rev->epdata.cb_arg);
1292 /* the status update should be observed after
1293 * the other fields change.
1295 __atomic_store_n(&rev->status, RTE_EPOLL_VALID,
1303 eal_init_tls_epfd(void)
1305 int pfd = epoll_create(255);
1309 "Cannot create epoll instance\n");
1316 rte_intr_tls_epfd(void)
1318 if (RTE_PER_LCORE(_epfd) == -1)
1319 RTE_PER_LCORE(_epfd) = eal_init_tls_epfd();
1321 return RTE_PER_LCORE(_epfd);
1325 eal_epoll_wait(int epfd, struct rte_epoll_event *events,
1326 int maxevents, int timeout, bool interruptible)
1328 struct epoll_event evs[maxevents];
1332 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1336 /* using per thread epoll fd */
1337 if (epfd == RTE_EPOLL_PER_THREAD)
1338 epfd = rte_intr_tls_epfd();
1341 rc = epoll_wait(epfd, evs, maxevents, timeout);
1342 if (likely(rc > 0)) {
1343 /* epoll_wait has at least one fd ready to read */
1344 rc = eal_epoll_process_event(evs, rc, events);
1346 } else if (rc < 0) {
1347 if (errno == EINTR) {
1353 /* epoll_wait fail */
1354 RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
1359 /* rc == 0, epoll_wait timed out */
1368 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
1369 int maxevents, int timeout)
1371 return eal_epoll_wait(epfd, events, maxevents, timeout, false);
1375 rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
1376 int maxevents, int timeout)
1378 return eal_epoll_wait(epfd, events, maxevents, timeout, true);
1382 eal_epoll_data_safe_free(struct rte_epoll_event *ev)
1384 uint32_t valid_status = RTE_EPOLL_VALID;
1386 while (!__atomic_compare_exchange_n(&ev->status, &valid_status,
1387 RTE_EPOLL_INVALID, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
1388 while (__atomic_load_n(&ev->status,
1389 __ATOMIC_RELAXED) != RTE_EPOLL_VALID)
1391 valid_status = RTE_EPOLL_VALID;
1393 memset(&ev->epdata, 0, sizeof(ev->epdata));
1399 rte_epoll_ctl(int epfd, int op, int fd,
1400 struct rte_epoll_event *event)
1402 struct epoll_event ev;
1405 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1409 /* using per thread epoll fd */
1410 if (epfd == RTE_EPOLL_PER_THREAD)
1411 epfd = rte_intr_tls_epfd();
1413 if (op == EPOLL_CTL_ADD) {
1414 __atomic_store_n(&event->status, RTE_EPOLL_VALID,
1416 event->fd = fd; /* ignore fd in event */
1418 ev.data.ptr = (void *)event;
1421 ev.events = event->epdata.event;
1422 if (epoll_ctl(epfd, op, fd, &ev) < 0) {
1423 RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n",
1424 op, fd, strerror(errno));
1425 if (op == EPOLL_CTL_ADD)
1426 /* rollback status when CTL_ADD fail */
1427 __atomic_store_n(&event->status, RTE_EPOLL_INVALID,
1432 if (op == EPOLL_CTL_DEL && __atomic_load_n(&event->status,
1433 __ATOMIC_RELAXED) != RTE_EPOLL_INVALID)
1434 eal_epoll_data_safe_free(event);
1440 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
1441 int op, unsigned int vec, void *data)
1443 struct rte_epoll_event *rev;
1444 struct rte_epoll_data *epdata;
1446 unsigned int efd_idx;
1449 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
1450 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
1452 if (intr_handle == NULL || rte_intr_nb_efd_get(intr_handle) == 0 ||
1453 efd_idx >= (unsigned int)rte_intr_nb_efd_get(intr_handle)) {
1454 RTE_LOG(ERR, EAL, "Wrong intr vector number.\n");
1459 case RTE_INTR_EVENT_ADD:
1460 epfd_op = EPOLL_CTL_ADD;
1461 rev = rte_intr_elist_index_get(intr_handle, efd_idx);
1462 if (__atomic_load_n(&rev->status,
1463 __ATOMIC_RELAXED) != RTE_EPOLL_INVALID) {
1464 RTE_LOG(INFO, EAL, "Event already been added.\n");
1468 /* attach to intr vector fd */
1469 epdata = &rev->epdata;
1470 epdata->event = EPOLLIN | EPOLLPRI | EPOLLET;
1471 epdata->data = data;
1472 epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr;
1473 epdata->cb_arg = (void *)intr_handle;
1474 rc = rte_epoll_ctl(epfd, epfd_op,
1475 rte_intr_efds_index_get(intr_handle, efd_idx), rev);
1478 "efd %d associated with vec %d added on epfd %d"
1479 "\n", rev->fd, vec, epfd);
1483 case RTE_INTR_EVENT_DEL:
1484 epfd_op = EPOLL_CTL_DEL;
1485 rev = rte_intr_elist_index_get(intr_handle, efd_idx);
1486 if (__atomic_load_n(&rev->status,
1487 __ATOMIC_RELAXED) == RTE_EPOLL_INVALID) {
1488 RTE_LOG(INFO, EAL, "Event does not exist.\n");
1492 rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev);
1497 RTE_LOG(ERR, EAL, "event op type mismatch\n");
1505 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
1508 struct rte_epoll_event *rev;
1510 for (i = 0; i < (uint32_t)rte_intr_nb_efd_get(intr_handle); i++) {
1511 rev = rte_intr_elist_index_get(intr_handle, i);
1512 if (__atomic_load_n(&rev->status,
1513 __ATOMIC_RELAXED) == RTE_EPOLL_INVALID)
1515 if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
1516 /* force free if the entry valid */
1517 eal_epoll_data_safe_free(rev);
1523 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
1527 uint32_t n = RTE_MIN(nb_efd, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1529 assert(nb_efd != 0);
1531 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VFIO_MSIX) {
1532 for (i = 0; i < n; i++) {
1533 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1536 "can't setup eventfd, error %i (%s)\n",
1537 errno, strerror(errno));
1541 if (rte_intr_efds_index_set(intr_handle, i, fd))
1545 if (rte_intr_nb_efd_set(intr_handle, n))
1548 if (rte_intr_max_intr_set(intr_handle, NB_OTHER_INTR + n))
1550 } else if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV) {
1551 /* only check, initialization would be done in vdev driver.*/
1552 if ((uint64_t)rte_intr_efd_counter_size_get(intr_handle) >
1553 sizeof(union rte_intr_read_buffer)) {
1554 RTE_LOG(ERR, EAL, "the efd_counter_size is oversized");
1558 if (rte_intr_efds_index_set(intr_handle, 0, rte_intr_fd_get(intr_handle)))
1560 if (rte_intr_nb_efd_set(intr_handle, RTE_MIN(nb_efd, 1U)))
1562 if (rte_intr_max_intr_set(intr_handle, NB_OTHER_INTR))
1570 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
1574 rte_intr_free_epoll_fd(intr_handle);
1575 if (rte_intr_max_intr_get(intr_handle) > rte_intr_nb_efd_get(intr_handle)) {
1576 for (i = 0; i < (uint32_t)rte_intr_nb_efd_get(intr_handle); i++)
1577 close(rte_intr_efds_index_get(intr_handle, i));
1579 rte_intr_nb_efd_set(intr_handle, 0);
1580 rte_intr_max_intr_set(intr_handle, 0);
1584 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
1586 return !(!rte_intr_nb_efd_get(intr_handle));
1590 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
1592 if (!rte_intr_dp_is_en(intr_handle))
1595 return !!(rte_intr_max_intr_get(intr_handle) -
1596 rte_intr_nb_efd_get(intr_handle));
1600 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
1602 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VFIO_MSIX)
1605 if (rte_intr_type_get(intr_handle) == RTE_INTR_HANDLE_VDEV)
1611 int rte_thread_is_intr(void)
1613 return pthread_equal(intr_thread, pthread_self());