4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/queue.h>
44 #include <sys/epoll.h>
45 #include <sys/signalfd.h>
46 #include <sys/ioctl.h>
47 #include <sys/eventfd.h>
49 #include <rte_common.h>
50 #include <rte_interrupts.h>
51 #include <rte_memory.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
60 #include <rte_debug.h>
62 #include <rte_mempool.h>
64 #include <rte_malloc.h>
65 #include <rte_errno.h>
66 #include <rte_spinlock.h>
68 #include "eal_private.h"
71 #define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
72 #define NB_OTHER_INTR 1
74 static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
90 * union buffer for reading on different devices
92 union rte_intr_read_buffer {
93 int uio_intr_count; /* for uio device */
95 uint64_t vfio_intr_count; /* for vfio device */
97 uint64_t timerfd_num; /* for timerfd */
98 char charbuf[16]; /* for others */
101 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
102 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
104 struct rte_intr_callback {
105 TAILQ_ENTRY(rte_intr_callback) next;
106 rte_intr_callback_fn cb_fn; /**< callback address */
107 void *cb_arg; /**< parameter for callback */
110 struct rte_intr_source {
111 TAILQ_ENTRY(rte_intr_source) next;
112 struct rte_intr_handle intr_handle; /**< interrupt handle */
113 struct rte_intr_cb_list callbacks; /**< user callbacks */
117 /* global spinlock for interrupt data operation */
118 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
120 /* union buffer for pipe read/write */
121 static union intr_pipefds intr_pipe;
123 /* interrupt sources list */
124 static struct rte_intr_source_list intr_sources;
126 /* interrupt handling thread */
127 static pthread_t intr_thread;
129 /* VFIO interrupts */
132 #define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
133 /* irq set buffer length for queue interrupts and LSC interrupt */
134 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
135 sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1))
137 /* enable legacy (INTx) interrupts */
139 vfio_enable_intx(struct rte_intr_handle *intr_handle) {
140 struct vfio_irq_set *irq_set;
141 char irq_set_buf[IRQ_SET_BUF_LEN];
145 len = sizeof(irq_set_buf);
148 irq_set = (struct vfio_irq_set *) irq_set_buf;
149 irq_set->argsz = len;
151 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
152 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
154 fd_ptr = (int *) &irq_set->data;
155 *fd_ptr = intr_handle->fd;
157 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
160 RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n",
165 /* unmask INTx after enabling */
166 memset(irq_set, 0, len);
167 len = sizeof(struct vfio_irq_set);
168 irq_set->argsz = len;
170 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
171 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
174 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
177 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
184 /* disable legacy (INTx) interrupts */
186 vfio_disable_intx(struct rte_intr_handle *intr_handle) {
187 struct vfio_irq_set *irq_set;
188 char irq_set_buf[IRQ_SET_BUF_LEN];
191 len = sizeof(struct vfio_irq_set);
193 /* mask interrupts before disabling */
194 irq_set = (struct vfio_irq_set *) irq_set_buf;
195 irq_set->argsz = len;
197 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
198 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
201 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
204 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
210 memset(irq_set, 0, len);
211 irq_set->argsz = len;
213 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
214 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
217 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
221 "Error disabling INTx interrupts for fd %d\n", intr_handle->fd);
227 /* enable MSI interrupts */
229 vfio_enable_msi(struct rte_intr_handle *intr_handle) {
231 char irq_set_buf[IRQ_SET_BUF_LEN];
232 struct vfio_irq_set *irq_set;
235 len = sizeof(irq_set_buf);
237 irq_set = (struct vfio_irq_set *) irq_set_buf;
238 irq_set->argsz = len;
240 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
241 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
243 fd_ptr = (int *) &irq_set->data;
244 *fd_ptr = intr_handle->fd;
246 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
249 RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n",
256 /* disable MSI interrupts */
258 vfio_disable_msi(struct rte_intr_handle *intr_handle) {
259 struct vfio_irq_set *irq_set;
260 char irq_set_buf[IRQ_SET_BUF_LEN];
263 len = sizeof(struct vfio_irq_set);
265 irq_set = (struct vfio_irq_set *) irq_set_buf;
266 irq_set->argsz = len;
268 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
269 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
272 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
276 "Error disabling MSI interrupts for fd %d\n", intr_handle->fd);
281 /* enable MSI-X interrupts */
283 vfio_enable_msix(struct rte_intr_handle *intr_handle) {
285 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
286 struct vfio_irq_set *irq_set;
289 len = sizeof(irq_set_buf);
291 irq_set = (struct vfio_irq_set *) irq_set_buf;
292 irq_set->argsz = len;
294 if (!intr_handle->max_intr)
295 intr_handle->max_intr = 1;
296 else if (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID)
297 intr_handle->max_intr = RTE_MAX_RXTX_INTR_VEC_ID + 1;
299 irq_set->count = intr_handle->max_intr;
303 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
304 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
306 fd_ptr = (int *) &irq_set->data;
308 memcpy(fd_ptr, intr_handle->efds, sizeof(intr_handle->efds));
309 fd_ptr[intr_handle->max_intr - 1] = intr_handle->fd;
311 fd_ptr[0] = intr_handle->fd;
314 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
317 RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n",
325 /* disable MSI-X interrupts */
327 vfio_disable_msix(struct rte_intr_handle *intr_handle) {
328 struct vfio_irq_set *irq_set;
329 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
332 len = sizeof(struct vfio_irq_set);
334 irq_set = (struct vfio_irq_set *) irq_set_buf;
335 irq_set->argsz = len;
337 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
338 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
341 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
345 "Error disabling MSI-X interrupts for fd %d\n", intr_handle->fd);
352 uio_intx_intr_disable(struct rte_intr_handle *intr_handle)
354 unsigned char command_high;
356 /* use UIO config file descriptor for uio_pci_generic */
357 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
359 "Error reading interrupts status for fd %d\n",
360 intr_handle->uio_cfg_fd);
363 /* disable interrupts */
365 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
367 "Error disabling interrupts for fd %d\n",
368 intr_handle->uio_cfg_fd);
376 uio_intx_intr_enable(struct rte_intr_handle *intr_handle)
378 unsigned char command_high;
380 /* use UIO config file descriptor for uio_pci_generic */
381 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
383 "Error reading interrupts status for fd %d\n",
384 intr_handle->uio_cfg_fd);
387 /* enable interrupts */
388 command_high &= ~0x4;
389 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
391 "Error enabling interrupts for fd %d\n",
392 intr_handle->uio_cfg_fd);
400 uio_intr_disable(struct rte_intr_handle *intr_handle)
404 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
406 "Error disabling interrupts for fd %d (%s)\n",
407 intr_handle->fd, strerror(errno));
414 uio_intr_enable(struct rte_intr_handle *intr_handle)
418 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
420 "Error enabling interrupts for fd %d (%s)\n",
421 intr_handle->fd, strerror(errno));
428 rte_intr_callback_register(struct rte_intr_handle *intr_handle,
429 rte_intr_callback_fn cb, void *cb_arg)
431 int ret, wake_thread;
432 struct rte_intr_source *src;
433 struct rte_intr_callback *callback;
437 /* first do parameter checking */
438 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
440 "Registering with invalid input parameter\n");
444 /* allocate a new interrupt callback entity */
445 callback = rte_zmalloc("interrupt callback list",
446 sizeof(*callback), 0);
447 if (callback == NULL) {
448 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
451 callback->cb_fn = cb;
452 callback->cb_arg = cb_arg;
454 rte_spinlock_lock(&intr_lock);
456 /* check if there is at least one callback registered for the fd */
457 TAILQ_FOREACH(src, &intr_sources, next) {
458 if (src->intr_handle.fd == intr_handle->fd) {
459 /* we had no interrupts for this */
460 if TAILQ_EMPTY(&src->callbacks)
463 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
469 /* no existing callbacks for this - add new source */
471 if ((src = rte_zmalloc("interrupt source list",
472 sizeof(*src), 0)) == NULL) {
473 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
477 src->intr_handle = *intr_handle;
478 TAILQ_INIT(&src->callbacks);
479 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
480 TAILQ_INSERT_TAIL(&intr_sources, src, next);
486 rte_spinlock_unlock(&intr_lock);
489 * check if need to notify the pipe fd waited by epoll_wait to
490 * rebuild the wait list.
493 if (write(intr_pipe.writefd, "1", 1) < 0)
500 rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
501 rte_intr_callback_fn cb_fn, void *cb_arg)
504 struct rte_intr_source *src;
505 struct rte_intr_callback *cb, *next;
507 /* do parameter checking first */
508 if (intr_handle == NULL || intr_handle->fd < 0) {
510 "Unregistering with invalid input parameter\n");
514 rte_spinlock_lock(&intr_lock);
516 /* check if the insterrupt source for the fd is existent */
517 TAILQ_FOREACH(src, &intr_sources, next)
518 if (src->intr_handle.fd == intr_handle->fd)
521 /* No interrupt source registered for the fd */
525 /* interrupt source has some active callbacks right now. */
526 } else if (src->active != 0) {
533 /*walk through the callbacks and remove all that match. */
534 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
536 next = TAILQ_NEXT(cb, next);
538 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
539 cb->cb_arg == cb_arg)) {
540 TAILQ_REMOVE(&src->callbacks, cb, next);
546 /* all callbacks for that source are removed. */
547 if (TAILQ_EMPTY(&src->callbacks)) {
548 TAILQ_REMOVE(&intr_sources, src, next);
553 rte_spinlock_unlock(&intr_lock);
555 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
556 if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
564 rte_intr_enable(struct rte_intr_handle *intr_handle)
566 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
569 switch (intr_handle->type){
570 /* write to the uio fd to enable the interrupt */
571 case RTE_INTR_HANDLE_UIO:
572 if (uio_intr_enable(intr_handle))
575 case RTE_INTR_HANDLE_UIO_INTX:
576 if (uio_intx_intr_enable(intr_handle))
579 /* not used at this moment */
580 case RTE_INTR_HANDLE_ALARM:
583 case RTE_INTR_HANDLE_VFIO_MSIX:
584 if (vfio_enable_msix(intr_handle))
587 case RTE_INTR_HANDLE_VFIO_MSI:
588 if (vfio_enable_msi(intr_handle))
591 case RTE_INTR_HANDLE_VFIO_LEGACY:
592 if (vfio_enable_intx(intr_handle))
596 /* unknown handle type */
599 "Unknown handle type of fd %d\n",
608 rte_intr_disable(struct rte_intr_handle *intr_handle)
610 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
613 switch (intr_handle->type){
614 /* write to the uio fd to disable the interrupt */
615 case RTE_INTR_HANDLE_UIO:
616 if (uio_intr_disable(intr_handle))
619 case RTE_INTR_HANDLE_UIO_INTX:
620 if (uio_intx_intr_disable(intr_handle))
623 /* not used at this moment */
624 case RTE_INTR_HANDLE_ALARM:
627 case RTE_INTR_HANDLE_VFIO_MSIX:
628 if (vfio_disable_msix(intr_handle))
631 case RTE_INTR_HANDLE_VFIO_MSI:
632 if (vfio_disable_msi(intr_handle))
635 case RTE_INTR_HANDLE_VFIO_LEGACY:
636 if (vfio_disable_intx(intr_handle))
640 /* unknown handle type */
643 "Unknown handle type of fd %d\n",
652 eal_intr_process_interrupts(struct epoll_event *events, int nfds)
655 struct rte_intr_source *src;
656 struct rte_intr_callback *cb;
657 union rte_intr_read_buffer buf;
658 struct rte_intr_callback active_cb;
660 for (n = 0; n < nfds; n++) {
663 * if the pipe fd is ready to read, return out to
664 * rebuild the wait list.
666 if (events[n].data.fd == intr_pipe.readfd){
667 int r = read(intr_pipe.readfd, buf.charbuf,
668 sizeof(buf.charbuf));
672 rte_spinlock_lock(&intr_lock);
673 TAILQ_FOREACH(src, &intr_sources, next)
674 if (src->intr_handle.fd ==
678 rte_spinlock_unlock(&intr_lock);
682 /* mark this interrupt source as active and release the lock. */
684 rte_spinlock_unlock(&intr_lock);
686 /* set the length to be read dor different handle type */
687 switch (src->intr_handle.type) {
688 case RTE_INTR_HANDLE_UIO:
689 case RTE_INTR_HANDLE_UIO_INTX:
690 bytes_read = sizeof(buf.uio_intr_count);
692 case RTE_INTR_HANDLE_ALARM:
693 bytes_read = sizeof(buf.timerfd_num);
696 case RTE_INTR_HANDLE_VFIO_MSIX:
697 case RTE_INTR_HANDLE_VFIO_MSI:
698 case RTE_INTR_HANDLE_VFIO_LEGACY:
699 bytes_read = sizeof(buf.vfio_intr_count);
708 * read out to clear the ready-to-be-read flag
711 bytes_read = read(events[n].data.fd, &buf, bytes_read);
712 if (bytes_read < 0) {
713 if (errno == EINTR || errno == EWOULDBLOCK)
716 RTE_LOG(ERR, EAL, "Error reading from file "
717 "descriptor %d: %s\n", events[n].data.fd,
719 } else if (bytes_read == 0)
720 RTE_LOG(ERR, EAL, "Read nothing from file "
721 "descriptor %d\n", events[n].data.fd);
723 /* grab a lock, again to call callbacks and update status. */
724 rte_spinlock_lock(&intr_lock);
726 if (bytes_read > 0) {
728 /* Finally, call all callbacks. */
729 TAILQ_FOREACH(cb, &src->callbacks, next) {
731 /* make a copy and unlock. */
733 rte_spinlock_unlock(&intr_lock);
735 /* call the actual callback */
736 active_cb.cb_fn(&src->intr_handle,
739 /*get the lock back. */
740 rte_spinlock_lock(&intr_lock);
744 /* we done with that interrupt source, release it. */
746 rte_spinlock_unlock(&intr_lock);
753 * It handles all the interrupts.
756 * epoll file descriptor.
758 * The number of file descriptors added in epoll.
764 eal_intr_handle_interrupts(int pfd, unsigned totalfds)
766 struct epoll_event events[totalfds];
770 nfds = epoll_wait(pfd, events, totalfds,
771 EAL_INTR_EPOLL_WAIT_FOREVER);
772 /* epoll_wait fail */
777 "epoll_wait returns with fail\n");
780 /* epoll_wait timeout, will never happens here */
783 /* epoll_wait has at least one fd ready to read */
784 if (eal_intr_process_interrupts(events, nfds) < 0)
790 * It builds/rebuilds up the epoll file descriptor with all the
791 * file descriptors being waited on. Then handles the interrupts.
799 static __attribute__((noreturn)) void *
800 eal_intr_thread_main(__rte_unused void *arg)
802 struct epoll_event ev;
804 /* host thread, never break out */
806 /* build up the epoll fd with all descriptors we are to
807 * wait on then pass it to the handle_interrupts function
809 static struct epoll_event pipe_event = {
810 .events = EPOLLIN | EPOLLPRI,
812 struct rte_intr_source *src;
815 /* create epoll fd */
816 int pfd = epoll_create(1);
818 rte_panic("Cannot create epoll instance\n");
820 pipe_event.data.fd = intr_pipe.readfd;
822 * add pipe fd into wait list, this pipe is used to
823 * rebuild the wait list.
825 if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
827 rte_panic("Error adding fd to %d epoll_ctl, %s\n",
828 intr_pipe.readfd, strerror(errno));
832 rte_spinlock_lock(&intr_lock);
834 TAILQ_FOREACH(src, &intr_sources, next) {
835 if (src->callbacks.tqh_first == NULL)
836 continue; /* skip those with no callbacks */
837 ev.events = EPOLLIN | EPOLLPRI;
838 ev.data.fd = src->intr_handle.fd;
841 * add all the uio device file descriptor
844 if (epoll_ctl(pfd, EPOLL_CTL_ADD,
845 src->intr_handle.fd, &ev) < 0){
846 rte_panic("Error adding fd %d epoll_ctl, %s\n",
847 src->intr_handle.fd, strerror(errno));
852 rte_spinlock_unlock(&intr_lock);
853 /* serve the interrupt */
854 eal_intr_handle_interrupts(pfd, numfds);
857 * when we return, we need to rebuild the
858 * list of fds to monitor.
865 rte_eal_intr_init(void)
869 /* init the global interrupt source head */
870 TAILQ_INIT(&intr_sources);
873 * create a pipe which will be waited by epoll and notified to
874 * rebuild the wait list of epoll.
876 if (pipe(intr_pipe.pipefd) < 0)
879 /* create the host thread to wait/handle the interrupt */
880 ret = pthread_create(&intr_thread, NULL,
881 eal_intr_thread_main, NULL);
884 "Failed to create thread for interrupt handling\n");
891 eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
893 union rte_intr_read_buffer buf;
896 switch (intr_handle->type) {
897 case RTE_INTR_HANDLE_UIO:
898 case RTE_INTR_HANDLE_UIO_INTX:
899 bytes_read = sizeof(buf.uio_intr_count);
902 case RTE_INTR_HANDLE_VFIO_MSIX:
903 case RTE_INTR_HANDLE_VFIO_MSI:
904 case RTE_INTR_HANDLE_VFIO_LEGACY:
905 bytes_read = sizeof(buf.vfio_intr_count);
910 RTE_LOG(INFO, EAL, "unexpected intr type\n");
915 * read out to clear the ready-to-be-read flag
919 bytes_read = read(fd, &buf, bytes_read);
920 if (bytes_read < 0) {
921 if (errno == EINTR || errno == EWOULDBLOCK ||
925 "Error reading from fd %d: %s\n",
926 fd, strerror(errno));
927 } else if (bytes_read == 0)
928 RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd);
935 eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
936 struct rte_epoll_event *events)
938 unsigned int i, count = 0;
939 struct rte_epoll_event *rev;
941 for (i = 0; i < n; i++) {
942 rev = evs[i].data.ptr;
943 if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID,
947 events[count].status = RTE_EPOLL_VALID;
948 events[count].fd = rev->fd;
949 events[count].epfd = rev->epfd;
950 events[count].epdata.event = rev->epdata.event;
951 events[count].epdata.data = rev->epdata.data;
952 if (rev->epdata.cb_fun)
953 rev->epdata.cb_fun(rev->fd,
956 rte_compiler_barrier();
957 rev->status = RTE_EPOLL_VALID;
964 eal_init_tls_epfd(void)
966 int pfd = epoll_create(255);
970 "Cannot create epoll instance\n");
977 rte_intr_tls_epfd(void)
979 if (RTE_PER_LCORE(_epfd) == -1)
980 RTE_PER_LCORE(_epfd) = eal_init_tls_epfd();
982 return RTE_PER_LCORE(_epfd);
986 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
987 int maxevents, int timeout)
989 struct epoll_event evs[maxevents];
993 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
997 /* using per thread epoll fd */
998 if (epfd == RTE_EPOLL_PER_THREAD)
999 epfd = rte_intr_tls_epfd();
1002 rc = epoll_wait(epfd, evs, maxevents, timeout);
1003 if (likely(rc > 0)) {
1004 /* epoll_wait has at least one fd ready to read */
1005 rc = eal_epoll_process_event(evs, rc, events);
1007 } else if (rc < 0) {
1010 /* epoll_wait fail */
1011 RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
1022 eal_epoll_data_safe_free(struct rte_epoll_event *ev)
1024 while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID,
1026 while (ev->status != RTE_EPOLL_VALID)
1028 memset(&ev->epdata, 0, sizeof(ev->epdata));
1034 rte_epoll_ctl(int epfd, int op, int fd,
1035 struct rte_epoll_event *event)
1037 struct epoll_event ev;
1040 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1044 /* using per thread epoll fd */
1045 if (epfd == RTE_EPOLL_PER_THREAD)
1046 epfd = rte_intr_tls_epfd();
1048 if (op == EPOLL_CTL_ADD) {
1049 event->status = RTE_EPOLL_VALID;
1050 event->fd = fd; /* ignore fd in event */
1052 ev.data.ptr = (void *)event;
1055 ev.events = event->epdata.event;
1056 if (epoll_ctl(epfd, op, fd, &ev) < 0) {
1057 RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n",
1058 op, fd, strerror(errno));
1059 if (op == EPOLL_CTL_ADD)
1060 /* rollback status when CTL_ADD fail */
1061 event->status = RTE_EPOLL_INVALID;
1065 if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID)
1066 eal_epoll_data_safe_free(event);
1073 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
1074 int op, unsigned int vec, void *data)
1076 struct rte_epoll_event *rev;
1077 struct rte_epoll_data *epdata;
1081 if (!intr_handle || intr_handle->nb_efd == 0 ||
1082 vec >= intr_handle->nb_efd) {
1083 RTE_LOG(ERR, EAL, "Wrong intr vector number.\n");
1088 case RTE_INTR_EVENT_ADD:
1089 epfd_op = EPOLL_CTL_ADD;
1090 rev = &intr_handle->elist[vec];
1091 if (rev->status != RTE_EPOLL_INVALID) {
1092 RTE_LOG(INFO, EAL, "Event already been added.\n");
1096 /* attach to intr vector fd */
1097 epdata = &rev->epdata;
1098 epdata->event = EPOLLIN | EPOLLPRI | EPOLLET;
1099 epdata->data = data;
1100 epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr;
1101 epdata->cb_arg = (void *)intr_handle;
1102 rc = rte_epoll_ctl(epfd, epfd_op, intr_handle->efds[vec], rev);
1105 "efd %d associated with vec %d added on epfd %d"
1106 "\n", rev->fd, vec, epfd);
1110 case RTE_INTR_EVENT_DEL:
1111 epfd_op = EPOLL_CTL_DEL;
1112 rev = &intr_handle->elist[vec];
1113 if (rev->status == RTE_EPOLL_INVALID) {
1114 RTE_LOG(INFO, EAL, "Event does not exist.\n");
1118 rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev);
1123 RTE_LOG(ERR, EAL, "event op type mismatch\n");
1131 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
1135 uint32_t n = RTE_MIN(nb_efd, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
1137 if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) {
1138 for (i = 0; i < n; i++) {
1139 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1142 "can't setup eventfd, error %i (%s)\n",
1143 errno, strerror(errno));
1146 intr_handle->efds[i] = fd;
1148 intr_handle->nb_efd = n;
1149 intr_handle->max_intr = NB_OTHER_INTR + n;
1151 intr_handle->efds[0] = intr_handle->fd;
1152 intr_handle->nb_efd = RTE_MIN(nb_efd, 1U);
1153 intr_handle->max_intr = NB_OTHER_INTR;
1160 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
1163 struct rte_epoll_event *rev;
1165 for (i = 0; i < intr_handle->nb_efd; i++) {
1166 rev = &intr_handle->elist[i];
1167 if (rev->status == RTE_EPOLL_INVALID)
1169 if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
1170 /* force free if the entry valid */
1171 eal_epoll_data_safe_free(rev);
1172 rev->status = RTE_EPOLL_INVALID;
1176 if (intr_handle->max_intr > intr_handle->nb_efd) {
1177 for (i = 0; i < intr_handle->nb_efd; i++)
1178 close(intr_handle->efds[i]);
1180 intr_handle->nb_efd = 0;
1181 intr_handle->max_intr = 0;
1185 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
1187 return !(!intr_handle->nb_efd);
1191 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
1193 return !!(intr_handle->max_intr - intr_handle->nb_efd);
1198 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
1199 int epfd, int op, unsigned int vec, void *data)
1201 RTE_SET_USED(intr_handle);
1210 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
1212 RTE_SET_USED(intr_handle);
1213 RTE_SET_USED(nb_efd);
1218 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
1220 RTE_SET_USED(intr_handle);
1224 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
1226 RTE_SET_USED(intr_handle);
1231 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
1233 RTE_SET_USED(intr_handle);