1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
15 #include <sys/epoll.h>
16 #include <sys/signalfd.h>
17 #include <sys/ioctl.h>
18 #include <sys/eventfd.h>
22 #include <rte_common.h>
23 #include <rte_interrupts.h>
24 #include <rte_memory.h>
25 #include <rte_launch.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_debug.h>
33 #include <rte_errno.h>
34 #include <rte_spinlock.h>
35 #include <rte_pause.h>
37 #include "eal_private.h"
39 #include "eal_thread.h"
41 #define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
42 #define NB_OTHER_INTR 1
44 static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
60 * union buffer for reading on different devices
62 union rte_intr_read_buffer {
63 int uio_intr_count; /* for uio device */
65 uint64_t vfio_intr_count; /* for vfio device */
67 uint64_t timerfd_num; /* for timerfd */
68 char charbuf[16]; /* for others */
71 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
72 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
74 struct rte_intr_callback {
75 TAILQ_ENTRY(rte_intr_callback) next;
76 rte_intr_callback_fn cb_fn; /**< callback address */
77 void *cb_arg; /**< parameter for callback */
80 struct rte_intr_source {
81 TAILQ_ENTRY(rte_intr_source) next;
82 struct rte_intr_handle intr_handle; /**< interrupt handle */
83 struct rte_intr_cb_list callbacks; /**< user callbacks */
87 /* global spinlock for interrupt data operation */
88 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
90 /* union buffer for pipe read/write */
91 static union intr_pipefds intr_pipe;
93 /* interrupt sources list */
94 static struct rte_intr_source_list intr_sources;
96 /* interrupt handling thread */
97 static pthread_t intr_thread;
102 #define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
103 /* irq set buffer length for queue interrupts and LSC interrupt */
104 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
105 sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1))
107 /* enable legacy (INTx) interrupts */
109 vfio_enable_intx(const struct rte_intr_handle *intr_handle) {
110 struct vfio_irq_set *irq_set;
111 char irq_set_buf[IRQ_SET_BUF_LEN];
115 len = sizeof(irq_set_buf);
118 irq_set = (struct vfio_irq_set *) irq_set_buf;
119 irq_set->argsz = len;
121 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
122 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
124 fd_ptr = (int *) &irq_set->data;
125 *fd_ptr = intr_handle->fd;
127 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
130 RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n",
135 /* unmask INTx after enabling */
136 memset(irq_set, 0, len);
137 len = sizeof(struct vfio_irq_set);
138 irq_set->argsz = len;
140 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
141 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
144 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
147 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
154 /* disable legacy (INTx) interrupts */
156 vfio_disable_intx(const struct rte_intr_handle *intr_handle) {
157 struct vfio_irq_set *irq_set;
158 char irq_set_buf[IRQ_SET_BUF_LEN];
161 len = sizeof(struct vfio_irq_set);
163 /* mask interrupts before disabling */
164 irq_set = (struct vfio_irq_set *) irq_set_buf;
165 irq_set->argsz = len;
167 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK;
168 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
171 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
174 RTE_LOG(ERR, EAL, "Error masking INTx interrupts for fd %d\n",
180 memset(irq_set, 0, len);
181 irq_set->argsz = len;
183 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
184 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
187 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
191 "Error disabling INTx interrupts for fd %d\n", intr_handle->fd);
197 /* enable MSI interrupts */
199 vfio_enable_msi(const struct rte_intr_handle *intr_handle) {
201 char irq_set_buf[IRQ_SET_BUF_LEN];
202 struct vfio_irq_set *irq_set;
205 len = sizeof(irq_set_buf);
207 irq_set = (struct vfio_irq_set *) irq_set_buf;
208 irq_set->argsz = len;
210 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
211 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
213 fd_ptr = (int *) &irq_set->data;
214 *fd_ptr = intr_handle->fd;
216 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
219 RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n",
226 /* disable MSI interrupts */
228 vfio_disable_msi(const struct rte_intr_handle *intr_handle) {
229 struct vfio_irq_set *irq_set;
230 char irq_set_buf[IRQ_SET_BUF_LEN];
233 len = sizeof(struct vfio_irq_set);
235 irq_set = (struct vfio_irq_set *) irq_set_buf;
236 irq_set->argsz = len;
238 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
239 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
242 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
246 "Error disabling MSI interrupts for fd %d\n", intr_handle->fd);
251 /* enable MSI-X interrupts */
253 vfio_enable_msix(const struct rte_intr_handle *intr_handle) {
255 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
256 struct vfio_irq_set *irq_set;
259 len = sizeof(irq_set_buf);
261 irq_set = (struct vfio_irq_set *) irq_set_buf;
262 irq_set->argsz = len;
263 /* 0 < irq_set->count < RTE_MAX_RXTX_INTR_VEC_ID + 1 */
264 irq_set->count = intr_handle->max_intr ?
265 (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID + 1 ?
266 RTE_MAX_RXTX_INTR_VEC_ID + 1 : intr_handle->max_intr) : 1;
267 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
268 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
270 fd_ptr = (int *) &irq_set->data;
271 /* INTR vector offset 0 reserve for non-efds mapping */
272 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = intr_handle->fd;
273 memcpy(&fd_ptr[RTE_INTR_VEC_RXTX_OFFSET], intr_handle->efds,
274 sizeof(*intr_handle->efds) * intr_handle->nb_efd);
276 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
279 RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n",
287 /* disable MSI-X interrupts */
289 vfio_disable_msix(const struct rte_intr_handle *intr_handle) {
290 struct vfio_irq_set *irq_set;
291 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
294 len = sizeof(struct vfio_irq_set);
296 irq_set = (struct vfio_irq_set *) irq_set_buf;
297 irq_set->argsz = len;
299 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
300 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
303 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
307 "Error disabling MSI-X interrupts for fd %d\n", intr_handle->fd);
314 uio_intx_intr_disable(const struct rte_intr_handle *intr_handle)
316 unsigned char command_high;
318 /* use UIO config file descriptor for uio_pci_generic */
319 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
321 "Error reading interrupts status for fd %d\n",
322 intr_handle->uio_cfg_fd);
325 /* disable interrupts */
327 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
329 "Error disabling interrupts for fd %d\n",
330 intr_handle->uio_cfg_fd);
338 uio_intx_intr_enable(const struct rte_intr_handle *intr_handle)
340 unsigned char command_high;
342 /* use UIO config file descriptor for uio_pci_generic */
343 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
345 "Error reading interrupts status for fd %d\n",
346 intr_handle->uio_cfg_fd);
349 /* enable interrupts */
350 command_high &= ~0x4;
351 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
353 "Error enabling interrupts for fd %d\n",
354 intr_handle->uio_cfg_fd);
362 uio_intr_disable(const struct rte_intr_handle *intr_handle)
366 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
368 "Error disabling interrupts for fd %d (%s)\n",
369 intr_handle->fd, strerror(errno));
376 uio_intr_enable(const struct rte_intr_handle *intr_handle)
380 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
382 "Error enabling interrupts for fd %d (%s)\n",
383 intr_handle->fd, strerror(errno));
390 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
391 rte_intr_callback_fn cb, void *cb_arg)
393 int ret, wake_thread;
394 struct rte_intr_source *src;
395 struct rte_intr_callback *callback;
399 /* first do parameter checking */
400 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
402 "Registering with invalid input parameter\n");
406 /* allocate a new interrupt callback entity */
407 callback = calloc(1, sizeof(*callback));
408 if (callback == NULL) {
409 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
412 callback->cb_fn = cb;
413 callback->cb_arg = cb_arg;
415 rte_spinlock_lock(&intr_lock);
417 /* check if there is at least one callback registered for the fd */
418 TAILQ_FOREACH(src, &intr_sources, next) {
419 if (src->intr_handle.fd == intr_handle->fd) {
420 /* we had no interrupts for this */
421 if (TAILQ_EMPTY(&src->callbacks))
424 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
430 /* no existing callbacks for this - add new source */
432 src = calloc(1, sizeof(*src));
434 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
438 src->intr_handle = *intr_handle;
439 TAILQ_INIT(&src->callbacks);
440 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
441 TAILQ_INSERT_TAIL(&intr_sources, src, next);
447 rte_spinlock_unlock(&intr_lock);
450 * check if need to notify the pipe fd waited by epoll_wait to
451 * rebuild the wait list.
454 if (write(intr_pipe.writefd, "1", 1) < 0)
461 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
462 rte_intr_callback_fn cb_fn, void *cb_arg)
465 struct rte_intr_source *src;
466 struct rte_intr_callback *cb, *next;
468 /* do parameter checking first */
469 if (intr_handle == NULL || intr_handle->fd < 0) {
471 "Unregistering with invalid input parameter\n");
475 rte_spinlock_lock(&intr_lock);
477 /* check if the insterrupt source for the fd is existent */
478 TAILQ_FOREACH(src, &intr_sources, next)
479 if (src->intr_handle.fd == intr_handle->fd)
482 /* No interrupt source registered for the fd */
486 /* interrupt source has some active callbacks right now. */
487 } else if (src->active != 0) {
494 /*walk through the callbacks and remove all that match. */
495 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
497 next = TAILQ_NEXT(cb, next);
499 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
500 cb->cb_arg == cb_arg)) {
501 TAILQ_REMOVE(&src->callbacks, cb, next);
507 /* all callbacks for that source are removed. */
508 if (TAILQ_EMPTY(&src->callbacks)) {
509 TAILQ_REMOVE(&intr_sources, src, next);
514 rte_spinlock_unlock(&intr_lock);
516 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
517 if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
525 rte_intr_enable(const struct rte_intr_handle *intr_handle)
527 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
530 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
533 switch (intr_handle->type){
534 /* write to the uio fd to enable the interrupt */
535 case RTE_INTR_HANDLE_UIO:
536 if (uio_intr_enable(intr_handle))
539 case RTE_INTR_HANDLE_UIO_INTX:
540 if (uio_intx_intr_enable(intr_handle))
543 /* not used at this moment */
544 case RTE_INTR_HANDLE_ALARM:
547 case RTE_INTR_HANDLE_VFIO_MSIX:
548 if (vfio_enable_msix(intr_handle))
551 case RTE_INTR_HANDLE_VFIO_MSI:
552 if (vfio_enable_msi(intr_handle))
555 case RTE_INTR_HANDLE_VFIO_LEGACY:
556 if (vfio_enable_intx(intr_handle))
560 /* not used at this moment */
561 case RTE_INTR_HANDLE_DEV_EVENT:
563 /* unknown handle type */
566 "Unknown handle type of fd %d\n",
575 rte_intr_disable(const struct rte_intr_handle *intr_handle)
577 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
580 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
583 switch (intr_handle->type){
584 /* write to the uio fd to disable the interrupt */
585 case RTE_INTR_HANDLE_UIO:
586 if (uio_intr_disable(intr_handle))
589 case RTE_INTR_HANDLE_UIO_INTX:
590 if (uio_intx_intr_disable(intr_handle))
593 /* not used at this moment */
594 case RTE_INTR_HANDLE_ALARM:
597 case RTE_INTR_HANDLE_VFIO_MSIX:
598 if (vfio_disable_msix(intr_handle))
601 case RTE_INTR_HANDLE_VFIO_MSI:
602 if (vfio_disable_msi(intr_handle))
605 case RTE_INTR_HANDLE_VFIO_LEGACY:
606 if (vfio_disable_intx(intr_handle))
610 /* not used at this moment */
611 case RTE_INTR_HANDLE_DEV_EVENT:
613 /* unknown handle type */
616 "Unknown handle type of fd %d\n",
625 eal_intr_process_interrupts(struct epoll_event *events, int nfds)
629 struct rte_intr_source *src;
630 struct rte_intr_callback *cb;
631 union rte_intr_read_buffer buf;
632 struct rte_intr_callback active_cb;
634 for (n = 0; n < nfds; n++) {
637 * if the pipe fd is ready to read, return out to
638 * rebuild the wait list.
640 if (events[n].data.fd == intr_pipe.readfd){
641 int r = read(intr_pipe.readfd, buf.charbuf,
642 sizeof(buf.charbuf));
646 rte_spinlock_lock(&intr_lock);
647 TAILQ_FOREACH(src, &intr_sources, next)
648 if (src->intr_handle.fd ==
652 rte_spinlock_unlock(&intr_lock);
656 /* mark this interrupt source as active and release the lock. */
658 rte_spinlock_unlock(&intr_lock);
660 /* set the length to be read dor different handle type */
661 switch (src->intr_handle.type) {
662 case RTE_INTR_HANDLE_UIO:
663 case RTE_INTR_HANDLE_UIO_INTX:
664 bytes_read = sizeof(buf.uio_intr_count);
666 case RTE_INTR_HANDLE_ALARM:
667 bytes_read = sizeof(buf.timerfd_num);
670 case RTE_INTR_HANDLE_VFIO_MSIX:
671 case RTE_INTR_HANDLE_VFIO_MSI:
672 case RTE_INTR_HANDLE_VFIO_LEGACY:
673 bytes_read = sizeof(buf.vfio_intr_count);
676 case RTE_INTR_HANDLE_VDEV:
677 case RTE_INTR_HANDLE_EXT:
681 case RTE_INTR_HANDLE_DEV_EVENT:
690 if (bytes_read > 0) {
692 * read out to clear the ready-to-be-read flag
695 bytes_read = read(events[n].data.fd, &buf, bytes_read);
696 if (bytes_read < 0) {
697 if (errno == EINTR || errno == EWOULDBLOCK)
700 RTE_LOG(ERR, EAL, "Error reading from file "
701 "descriptor %d: %s\n",
704 } else if (bytes_read == 0)
705 RTE_LOG(ERR, EAL, "Read nothing from file "
706 "descriptor %d\n", events[n].data.fd);
711 /* grab a lock, again to call callbacks and update status. */
712 rte_spinlock_lock(&intr_lock);
716 /* Finally, call all callbacks. */
717 TAILQ_FOREACH(cb, &src->callbacks, next) {
719 /* make a copy and unlock. */
721 rte_spinlock_unlock(&intr_lock);
723 /* call the actual callback */
724 active_cb.cb_fn(active_cb.cb_arg);
726 /*get the lock back. */
727 rte_spinlock_lock(&intr_lock);
731 /* we done with that interrupt source, release it. */
733 rte_spinlock_unlock(&intr_lock);
740 * It handles all the interrupts.
743 * epoll file descriptor.
745 * The number of file descriptors added in epoll.
751 eal_intr_handle_interrupts(int pfd, unsigned totalfds)
753 struct epoll_event events[totalfds];
757 nfds = epoll_wait(pfd, events, totalfds,
758 EAL_INTR_EPOLL_WAIT_FOREVER);
759 /* epoll_wait fail */
764 "epoll_wait returns with fail\n");
767 /* epoll_wait timeout, will never happens here */
770 /* epoll_wait has at least one fd ready to read */
771 if (eal_intr_process_interrupts(events, nfds) < 0)
777 * It builds/rebuilds up the epoll file descriptor with all the
778 * file descriptors being waited on. Then handles the interrupts.
786 static __attribute__((noreturn)) void *
787 eal_intr_thread_main(__rte_unused void *arg)
789 struct epoll_event ev;
791 /* host thread, never break out */
793 /* build up the epoll fd with all descriptors we are to
794 * wait on then pass it to the handle_interrupts function
796 static struct epoll_event pipe_event = {
797 .events = EPOLLIN | EPOLLPRI,
799 struct rte_intr_source *src;
802 /* create epoll fd */
803 int pfd = epoll_create(1);
805 rte_panic("Cannot create epoll instance\n");
807 pipe_event.data.fd = intr_pipe.readfd;
809 * add pipe fd into wait list, this pipe is used to
810 * rebuild the wait list.
812 if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
814 rte_panic("Error adding fd to %d epoll_ctl, %s\n",
815 intr_pipe.readfd, strerror(errno));
819 rte_spinlock_lock(&intr_lock);
821 TAILQ_FOREACH(src, &intr_sources, next) {
822 if (src->callbacks.tqh_first == NULL)
823 continue; /* skip those with no callbacks */
824 ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP;
825 ev.data.fd = src->intr_handle.fd;
828 * add all the uio device file descriptor
831 if (epoll_ctl(pfd, EPOLL_CTL_ADD,
832 src->intr_handle.fd, &ev) < 0){
833 rte_panic("Error adding fd %d epoll_ctl, %s\n",
834 src->intr_handle.fd, strerror(errno));
839 rte_spinlock_unlock(&intr_lock);
840 /* serve the interrupt */
841 eal_intr_handle_interrupts(pfd, numfds);
844 * when we return, we need to rebuild the
845 * list of fds to monitor.
852 rte_eal_intr_init(void)
856 /* init the global interrupt source head */
857 TAILQ_INIT(&intr_sources);
860 * create a pipe which will be waited by epoll and notified to
861 * rebuild the wait list of epoll.
863 if (pipe(intr_pipe.pipefd) < 0) {
868 /* create the host thread to wait/handle the interrupt */
869 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
870 eal_intr_thread_main, NULL);
874 "Failed to create thread for interrupt handling\n");
881 eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
883 union rte_intr_read_buffer buf;
887 switch (intr_handle->type) {
888 case RTE_INTR_HANDLE_UIO:
889 case RTE_INTR_HANDLE_UIO_INTX:
890 bytes_read = sizeof(buf.uio_intr_count);
893 case RTE_INTR_HANDLE_VFIO_MSIX:
894 case RTE_INTR_HANDLE_VFIO_MSI:
895 case RTE_INTR_HANDLE_VFIO_LEGACY:
896 bytes_read = sizeof(buf.vfio_intr_count);
899 case RTE_INTR_HANDLE_VDEV:
900 bytes_read = intr_handle->efd_counter_size;
901 /* For vdev, number of bytes to read is set by driver */
903 case RTE_INTR_HANDLE_EXT:
907 RTE_LOG(INFO, EAL, "unexpected intr type\n");
912 * read out to clear the ready-to-be-read flag
918 nbytes = read(fd, &buf, bytes_read);
920 if (errno == EINTR || errno == EWOULDBLOCK ||
924 "Error reading from fd %d: %s\n",
925 fd, strerror(errno));
926 } else if (nbytes == 0)
927 RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd);
933 eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
934 struct rte_epoll_event *events)
936 unsigned int i, count = 0;
937 struct rte_epoll_event *rev;
939 for (i = 0; i < n; i++) {
940 rev = evs[i].data.ptr;
941 if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID,
945 events[count].status = RTE_EPOLL_VALID;
946 events[count].fd = rev->fd;
947 events[count].epfd = rev->epfd;
948 events[count].epdata.event = rev->epdata.event;
949 events[count].epdata.data = rev->epdata.data;
950 if (rev->epdata.cb_fun)
951 rev->epdata.cb_fun(rev->fd,
954 rte_compiler_barrier();
955 rev->status = RTE_EPOLL_VALID;
962 eal_init_tls_epfd(void)
964 int pfd = epoll_create(255);
968 "Cannot create epoll instance\n");
975 rte_intr_tls_epfd(void)
977 if (RTE_PER_LCORE(_epfd) == -1)
978 RTE_PER_LCORE(_epfd) = eal_init_tls_epfd();
980 return RTE_PER_LCORE(_epfd);
984 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
985 int maxevents, int timeout)
987 struct epoll_event evs[maxevents];
991 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
995 /* using per thread epoll fd */
996 if (epfd == RTE_EPOLL_PER_THREAD)
997 epfd = rte_intr_tls_epfd();
1000 rc = epoll_wait(epfd, evs, maxevents, timeout);
1001 if (likely(rc > 0)) {
1002 /* epoll_wait has at least one fd ready to read */
1003 rc = eal_epoll_process_event(evs, rc, events);
1005 } else if (rc < 0) {
1008 /* epoll_wait fail */
1009 RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
1014 /* rc == 0, epoll_wait timed out */
1023 eal_epoll_data_safe_free(struct rte_epoll_event *ev)
1025 while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID,
1027 while (ev->status != RTE_EPOLL_VALID)
1029 memset(&ev->epdata, 0, sizeof(ev->epdata));
1035 rte_epoll_ctl(int epfd, int op, int fd,
1036 struct rte_epoll_event *event)
1038 struct epoll_event ev;
1041 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1045 /* using per thread epoll fd */
1046 if (epfd == RTE_EPOLL_PER_THREAD)
1047 epfd = rte_intr_tls_epfd();
1049 if (op == EPOLL_CTL_ADD) {
1050 event->status = RTE_EPOLL_VALID;
1051 event->fd = fd; /* ignore fd in event */
1053 ev.data.ptr = (void *)event;
1056 ev.events = event->epdata.event;
1057 if (epoll_ctl(epfd, op, fd, &ev) < 0) {
1058 RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n",
1059 op, fd, strerror(errno));
1060 if (op == EPOLL_CTL_ADD)
1061 /* rollback status when CTL_ADD fail */
1062 event->status = RTE_EPOLL_INVALID;
1066 if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID)
1067 eal_epoll_data_safe_free(event);
1073 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
1074 int op, unsigned int vec, void *data)
1076 struct rte_epoll_event *rev;
1077 struct rte_epoll_data *epdata;
1079 unsigned int efd_idx;
1082 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
1083 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
1085 if (!intr_handle || intr_handle->nb_efd == 0 ||
1086 efd_idx >= intr_handle->nb_efd) {
1087 RTE_LOG(ERR, EAL, "Wrong intr vector number.\n");
1092 case RTE_INTR_EVENT_ADD:
1093 epfd_op = EPOLL_CTL_ADD;
1094 rev = &intr_handle->elist[efd_idx];
1095 if (rev->status != RTE_EPOLL_INVALID) {
1096 RTE_LOG(INFO, EAL, "Event already been added.\n");
1100 /* attach to intr vector fd */
1101 epdata = &rev->epdata;
1102 epdata->event = EPOLLIN | EPOLLPRI | EPOLLET;
1103 epdata->data = data;
1104 epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr;
1105 epdata->cb_arg = (void *)intr_handle;
1106 rc = rte_epoll_ctl(epfd, epfd_op,
1107 intr_handle->efds[efd_idx], rev);
1110 "efd %d associated with vec %d added on epfd %d"
1111 "\n", rev->fd, vec, epfd);
1115 case RTE_INTR_EVENT_DEL:
1116 epfd_op = EPOLL_CTL_DEL;
1117 rev = &intr_handle->elist[efd_idx];
1118 if (rev->status == RTE_EPOLL_INVALID) {
1119 RTE_LOG(INFO, EAL, "Event does not exist.\n");
1123 rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev);
1128 RTE_LOG(ERR, EAL, "event op type mismatch\n");
1136 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
1139 struct rte_epoll_event *rev;
1141 for (i = 0; i < intr_handle->nb_efd; i++) {
1142 rev = &intr_handle->elist[i];
1143 if (rev->status == RTE_EPOLL_INVALID)
1145 if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
1146 /* force free if the entry valid */
1147 eal_epoll_data_safe_free(rev);
1148 rev->status = RTE_EPOLL_INVALID;
1154 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
1158 uint32_t n = RTE_MIN(nb_efd, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1160 assert(nb_efd != 0);
1162 if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) {
1163 for (i = 0; i < n; i++) {
1164 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1167 "can't setup eventfd, error %i (%s)\n",
1168 errno, strerror(errno));
1171 intr_handle->efds[i] = fd;
1173 intr_handle->nb_efd = n;
1174 intr_handle->max_intr = NB_OTHER_INTR + n;
1175 } else if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
1176 /* only check, initialization would be done in vdev driver.*/
1177 if (intr_handle->efd_counter_size >
1178 sizeof(union rte_intr_read_buffer)) {
1179 RTE_LOG(ERR, EAL, "the efd_counter_size is oversized");
1183 intr_handle->efds[0] = intr_handle->fd;
1184 intr_handle->nb_efd = RTE_MIN(nb_efd, 1U);
1185 intr_handle->max_intr = NB_OTHER_INTR;
1192 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
1196 rte_intr_free_epoll_fd(intr_handle);
1197 if (intr_handle->max_intr > intr_handle->nb_efd) {
1198 for (i = 0; i < intr_handle->nb_efd; i++)
1199 close(intr_handle->efds[i]);
1201 intr_handle->nb_efd = 0;
1202 intr_handle->max_intr = 0;
1206 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
1208 return !(!intr_handle->nb_efd);
1212 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
1214 if (!rte_intr_dp_is_en(intr_handle))
1217 return !!(intr_handle->max_intr - intr_handle->nb_efd);
1221 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
1223 if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)
1226 if (intr_handle->type == RTE_INTR_HANDLE_VDEV)