4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/queue.h>
44 #include <sys/epoll.h>
45 #include <sys/signalfd.h>
46 #include <sys/ioctl.h>
48 #include <rte_common.h>
49 #include <rte_interrupts.h>
50 #include <rte_memory.h>
51 #include <rte_memzone.h>
52 #include <rte_launch.h>
54 #include <rte_per_lcore.h>
55 #include <rte_lcore.h>
56 #include <rte_atomic.h>
57 #include <rte_branch_prediction.h>
59 #include <rte_debug.h>
61 #include <rte_mempool.h>
63 #include <rte_malloc.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
67 #include "eal_private.h"
70 #define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
72 static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
88 * union buffer for reading on different devices
90 union rte_intr_read_buffer {
91 int uio_intr_count; /* for uio device */
93 uint64_t vfio_intr_count; /* for vfio device */
95 uint64_t timerfd_num; /* for timerfd */
96 char charbuf[16]; /* for others */
99 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
100 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
102 struct rte_intr_callback {
103 TAILQ_ENTRY(rte_intr_callback) next;
104 rte_intr_callback_fn cb_fn; /**< callback address */
105 void *cb_arg; /**< parameter for callback */
108 struct rte_intr_source {
109 TAILQ_ENTRY(rte_intr_source) next;
110 struct rte_intr_handle intr_handle; /**< interrupt handle */
111 struct rte_intr_cb_list callbacks; /**< user callbacks */
115 /* global spinlock for interrupt data operation */
116 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
118 /* union buffer for pipe read/write */
119 static union intr_pipefds intr_pipe;
121 /* interrupt sources list */
122 static struct rte_intr_source_list intr_sources;
124 /* interrupt handling thread */
125 static pthread_t intr_thread;
127 /* VFIO interrupts */
130 #define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
131 /* irq set buffer length for queue interrupts and LSC interrupt */
132 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
133 sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1))
135 /* enable legacy (INTx) interrupts */
137 vfio_enable_intx(struct rte_intr_handle *intr_handle) {
138 struct vfio_irq_set *irq_set;
139 char irq_set_buf[IRQ_SET_BUF_LEN];
143 len = sizeof(irq_set_buf);
146 irq_set = (struct vfio_irq_set *) irq_set_buf;
147 irq_set->argsz = len;
149 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
150 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
152 fd_ptr = (int *) &irq_set->data;
153 *fd_ptr = intr_handle->fd;
155 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
158 RTE_LOG(ERR, EAL, "Error enabling INTx interrupts for fd %d\n",
163 /* unmask INTx after enabling */
164 memset(irq_set, 0, len);
165 len = sizeof(struct vfio_irq_set);
166 irq_set->argsz = len;
168 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
169 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
172 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
175 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
182 /* disable legacy (INTx) interrupts */
184 vfio_disable_intx(struct rte_intr_handle *intr_handle) {
185 struct vfio_irq_set *irq_set;
186 char irq_set_buf[IRQ_SET_BUF_LEN];
189 len = sizeof(struct vfio_irq_set);
191 /* mask interrupts before disabling */
192 irq_set = (struct vfio_irq_set *) irq_set_buf;
193 irq_set->argsz = len;
195 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
196 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
199 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
202 RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
208 memset(irq_set, 0, len);
209 irq_set->argsz = len;
211 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
212 irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
215 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
219 "Error disabling INTx interrupts for fd %d\n", intr_handle->fd);
225 /* enable MSI interrupts */
227 vfio_enable_msi(struct rte_intr_handle *intr_handle) {
229 char irq_set_buf[IRQ_SET_BUF_LEN];
230 struct vfio_irq_set *irq_set;
233 len = sizeof(irq_set_buf);
235 irq_set = (struct vfio_irq_set *) irq_set_buf;
236 irq_set->argsz = len;
238 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
239 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
241 fd_ptr = (int *) &irq_set->data;
242 *fd_ptr = intr_handle->fd;
244 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
247 RTE_LOG(ERR, EAL, "Error enabling MSI interrupts for fd %d\n",
254 /* disable MSI interrupts */
256 vfio_disable_msi(struct rte_intr_handle *intr_handle) {
257 struct vfio_irq_set *irq_set;
258 char irq_set_buf[IRQ_SET_BUF_LEN];
261 len = sizeof(struct vfio_irq_set);
263 irq_set = (struct vfio_irq_set *) irq_set_buf;
264 irq_set->argsz = len;
266 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
267 irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
270 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
274 "Error disabling MSI interrupts for fd %d\n", intr_handle->fd);
279 /* enable MSI-X interrupts */
281 vfio_enable_msix(struct rte_intr_handle *intr_handle) {
283 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
284 struct vfio_irq_set *irq_set;
287 len = sizeof(irq_set_buf);
289 irq_set = (struct vfio_irq_set *) irq_set_buf;
290 irq_set->argsz = len;
292 if (!intr_handle->max_intr)
293 intr_handle->max_intr = 1;
294 else if (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID)
295 intr_handle->max_intr = RTE_MAX_RXTX_INTR_VEC_ID + 1;
297 irq_set->count = intr_handle->max_intr;
301 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
302 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
304 fd_ptr = (int *) &irq_set->data;
306 memcpy(fd_ptr, intr_handle->efds, sizeof(intr_handle->efds));
307 fd_ptr[intr_handle->max_intr - 1] = intr_handle->fd;
309 fd_ptr[0] = intr_handle->fd;
312 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
315 RTE_LOG(ERR, EAL, "Error enabling MSI-X interrupts for fd %d\n",
323 /* disable MSI-X interrupts */
325 vfio_disable_msix(struct rte_intr_handle *intr_handle) {
326 struct vfio_irq_set *irq_set;
327 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
330 len = sizeof(struct vfio_irq_set);
332 irq_set = (struct vfio_irq_set *) irq_set_buf;
333 irq_set->argsz = len;
335 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
336 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
339 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
343 "Error disabling MSI-X interrupts for fd %d\n", intr_handle->fd);
350 uio_intx_intr_disable(struct rte_intr_handle *intr_handle)
352 unsigned char command_high;
354 /* use UIO config file descriptor for uio_pci_generic */
355 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
357 "Error reading interrupts status for fd %d\n",
358 intr_handle->uio_cfg_fd);
361 /* disable interrupts */
363 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
365 "Error disabling interrupts for fd %d\n",
366 intr_handle->uio_cfg_fd);
374 uio_intx_intr_enable(struct rte_intr_handle *intr_handle)
376 unsigned char command_high;
378 /* use UIO config file descriptor for uio_pci_generic */
379 if (pread(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
381 "Error reading interrupts status for fd %d\n",
382 intr_handle->uio_cfg_fd);
385 /* enable interrupts */
386 command_high &= ~0x4;
387 if (pwrite(intr_handle->uio_cfg_fd, &command_high, 1, 5) != 1) {
389 "Error enabling interrupts for fd %d\n",
390 intr_handle->uio_cfg_fd);
398 uio_intr_disable(struct rte_intr_handle *intr_handle)
402 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
404 "Error disabling interrupts for fd %d (%s)\n",
405 intr_handle->fd, strerror(errno));
412 uio_intr_enable(struct rte_intr_handle *intr_handle)
416 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
418 "Error enabling interrupts for fd %d (%s)\n",
419 intr_handle->fd, strerror(errno));
426 rte_intr_callback_register(struct rte_intr_handle *intr_handle,
427 rte_intr_callback_fn cb, void *cb_arg)
429 int ret, wake_thread;
430 struct rte_intr_source *src;
431 struct rte_intr_callback *callback;
435 /* first do parameter checking */
436 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
438 "Registering with invalid input parameter\n");
442 /* allocate a new interrupt callback entity */
443 callback = rte_zmalloc("interrupt callback list",
444 sizeof(*callback), 0);
445 if (callback == NULL) {
446 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
449 callback->cb_fn = cb;
450 callback->cb_arg = cb_arg;
452 rte_spinlock_lock(&intr_lock);
454 /* check if there is at least one callback registered for the fd */
455 TAILQ_FOREACH(src, &intr_sources, next) {
456 if (src->intr_handle.fd == intr_handle->fd) {
457 /* we had no interrupts for this */
458 if TAILQ_EMPTY(&src->callbacks)
461 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
467 /* no existing callbacks for this - add new source */
469 if ((src = rte_zmalloc("interrupt source list",
470 sizeof(*src), 0)) == NULL) {
471 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
475 src->intr_handle = *intr_handle;
476 TAILQ_INIT(&src->callbacks);
477 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
478 TAILQ_INSERT_TAIL(&intr_sources, src, next);
484 rte_spinlock_unlock(&intr_lock);
487 * check if need to notify the pipe fd waited by epoll_wait to
488 * rebuild the wait list.
491 if (write(intr_pipe.writefd, "1", 1) < 0)
498 rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
499 rte_intr_callback_fn cb_fn, void *cb_arg)
502 struct rte_intr_source *src;
503 struct rte_intr_callback *cb, *next;
505 /* do parameter checking first */
506 if (intr_handle == NULL || intr_handle->fd < 0) {
508 "Unregistering with invalid input parameter\n");
512 rte_spinlock_lock(&intr_lock);
514 /* check if the insterrupt source for the fd is existent */
515 TAILQ_FOREACH(src, &intr_sources, next)
516 if (src->intr_handle.fd == intr_handle->fd)
519 /* No interrupt source registered for the fd */
523 /* interrupt source has some active callbacks right now. */
524 } else if (src->active != 0) {
531 /*walk through the callbacks and remove all that match. */
532 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
534 next = TAILQ_NEXT(cb, next);
536 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
537 cb->cb_arg == cb_arg)) {
538 TAILQ_REMOVE(&src->callbacks, cb, next);
544 /* all callbacks for that source are removed. */
545 if (TAILQ_EMPTY(&src->callbacks)) {
546 TAILQ_REMOVE(&intr_sources, src, next);
551 rte_spinlock_unlock(&intr_lock);
553 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
554 if (ret >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
562 rte_intr_enable(struct rte_intr_handle *intr_handle)
564 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
567 switch (intr_handle->type){
568 /* write to the uio fd to enable the interrupt */
569 case RTE_INTR_HANDLE_UIO:
570 if (uio_intr_enable(intr_handle))
573 case RTE_INTR_HANDLE_UIO_INTX:
574 if (uio_intx_intr_enable(intr_handle))
577 /* not used at this moment */
578 case RTE_INTR_HANDLE_ALARM:
581 case RTE_INTR_HANDLE_VFIO_MSIX:
582 if (vfio_enable_msix(intr_handle))
585 case RTE_INTR_HANDLE_VFIO_MSI:
586 if (vfio_enable_msi(intr_handle))
589 case RTE_INTR_HANDLE_VFIO_LEGACY:
590 if (vfio_enable_intx(intr_handle))
594 /* unknown handle type */
597 "Unknown handle type of fd %d\n",
606 rte_intr_disable(struct rte_intr_handle *intr_handle)
608 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
611 switch (intr_handle->type){
612 /* write to the uio fd to disable the interrupt */
613 case RTE_INTR_HANDLE_UIO:
614 if (uio_intr_disable(intr_handle))
617 case RTE_INTR_HANDLE_UIO_INTX:
618 if (uio_intx_intr_disable(intr_handle))
621 /* not used at this moment */
622 case RTE_INTR_HANDLE_ALARM:
625 case RTE_INTR_HANDLE_VFIO_MSIX:
626 if (vfio_disable_msix(intr_handle))
629 case RTE_INTR_HANDLE_VFIO_MSI:
630 if (vfio_disable_msi(intr_handle))
633 case RTE_INTR_HANDLE_VFIO_LEGACY:
634 if (vfio_disable_intx(intr_handle))
638 /* unknown handle type */
641 "Unknown handle type of fd %d\n",
650 eal_intr_process_interrupts(struct epoll_event *events, int nfds)
653 struct rte_intr_source *src;
654 struct rte_intr_callback *cb;
655 union rte_intr_read_buffer buf;
656 struct rte_intr_callback active_cb;
658 for (n = 0; n < nfds; n++) {
661 * if the pipe fd is ready to read, return out to
662 * rebuild the wait list.
664 if (events[n].data.fd == intr_pipe.readfd){
665 int r = read(intr_pipe.readfd, buf.charbuf,
666 sizeof(buf.charbuf));
670 rte_spinlock_lock(&intr_lock);
671 TAILQ_FOREACH(src, &intr_sources, next)
672 if (src->intr_handle.fd ==
676 rte_spinlock_unlock(&intr_lock);
680 /* mark this interrupt source as active and release the lock. */
682 rte_spinlock_unlock(&intr_lock);
684 /* set the length to be read dor different handle type */
685 switch (src->intr_handle.type) {
686 case RTE_INTR_HANDLE_UIO:
687 bytes_read = sizeof(buf.uio_intr_count);
689 case RTE_INTR_HANDLE_ALARM:
690 bytes_read = sizeof(buf.timerfd_num);
693 case RTE_INTR_HANDLE_VFIO_MSIX:
694 case RTE_INTR_HANDLE_VFIO_MSI:
695 case RTE_INTR_HANDLE_VFIO_LEGACY:
696 bytes_read = sizeof(buf.vfio_intr_count);
705 * read out to clear the ready-to-be-read flag
708 bytes_read = read(events[n].data.fd, &buf, bytes_read);
709 if (bytes_read < 0) {
710 if (errno == EINTR || errno == EWOULDBLOCK)
713 RTE_LOG(ERR, EAL, "Error reading from file "
714 "descriptor %d: %s\n", events[n].data.fd,
716 } else if (bytes_read == 0)
717 RTE_LOG(ERR, EAL, "Read nothing from file "
718 "descriptor %d\n", events[n].data.fd);
720 /* grab a lock, again to call callbacks and update status. */
721 rte_spinlock_lock(&intr_lock);
723 if (bytes_read > 0) {
725 /* Finally, call all callbacks. */
726 TAILQ_FOREACH(cb, &src->callbacks, next) {
728 /* make a copy and unlock. */
730 rte_spinlock_unlock(&intr_lock);
732 /* call the actual callback */
733 active_cb.cb_fn(&src->intr_handle,
736 /*get the lock back. */
737 rte_spinlock_lock(&intr_lock);
741 /* we done with that interrupt source, release it. */
743 rte_spinlock_unlock(&intr_lock);
750 * It handles all the interrupts.
753 * epoll file descriptor.
755 * The number of file descriptors added in epoll.
761 eal_intr_handle_interrupts(int pfd, unsigned totalfds)
763 struct epoll_event events[totalfds];
767 nfds = epoll_wait(pfd, events, totalfds,
768 EAL_INTR_EPOLL_WAIT_FOREVER);
769 /* epoll_wait fail */
774 "epoll_wait returns with fail\n");
777 /* epoll_wait timeout, will never happens here */
780 /* epoll_wait has at least one fd ready to read */
781 if (eal_intr_process_interrupts(events, nfds) < 0)
787 * It builds/rebuilds up the epoll file descriptor with all the
788 * file descriptors being waited on. Then handles the interrupts.
796 static __attribute__((noreturn)) void *
797 eal_intr_thread_main(__rte_unused void *arg)
799 struct epoll_event ev;
801 /* host thread, never break out */
803 /* build up the epoll fd with all descriptors we are to
804 * wait on then pass it to the handle_interrupts function
806 static struct epoll_event pipe_event = {
807 .events = EPOLLIN | EPOLLPRI,
809 struct rte_intr_source *src;
812 /* create epoll fd */
813 int pfd = epoll_create(1);
815 rte_panic("Cannot create epoll instance\n");
817 pipe_event.data.fd = intr_pipe.readfd;
819 * add pipe fd into wait list, this pipe is used to
820 * rebuild the wait list.
822 if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
824 rte_panic("Error adding fd to %d epoll_ctl, %s\n",
825 intr_pipe.readfd, strerror(errno));
829 rte_spinlock_lock(&intr_lock);
831 TAILQ_FOREACH(src, &intr_sources, next) {
832 if (src->callbacks.tqh_first == NULL)
833 continue; /* skip those with no callbacks */
834 ev.events = EPOLLIN | EPOLLPRI;
835 ev.data.fd = src->intr_handle.fd;
838 * add all the uio device file descriptor
841 if (epoll_ctl(pfd, EPOLL_CTL_ADD,
842 src->intr_handle.fd, &ev) < 0){
843 rte_panic("Error adding fd %d epoll_ctl, %s\n",
844 src->intr_handle.fd, strerror(errno));
849 rte_spinlock_unlock(&intr_lock);
850 /* serve the interrupt */
851 eal_intr_handle_interrupts(pfd, numfds);
854 * when we return, we need to rebuild the
855 * list of fds to monitor.
862 rte_eal_intr_init(void)
866 /* init the global interrupt source head */
867 TAILQ_INIT(&intr_sources);
870 * create a pipe which will be waited by epoll and notified to
871 * rebuild the wait list of epoll.
873 if (pipe(intr_pipe.pipefd) < 0)
876 /* create the host thread to wait/handle the interrupt */
877 ret = pthread_create(&intr_thread, NULL,
878 eal_intr_thread_main, NULL);
881 "Failed to create thread for interrupt handling\n");
888 eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
890 union rte_intr_read_buffer buf;
893 switch (intr_handle->type) {
894 case RTE_INTR_HANDLE_UIO:
895 case RTE_INTR_HANDLE_UIO_INTX:
896 bytes_read = sizeof(buf.uio_intr_count);
899 case RTE_INTR_HANDLE_VFIO_MSIX:
900 case RTE_INTR_HANDLE_VFIO_MSI:
901 case RTE_INTR_HANDLE_VFIO_LEGACY:
902 bytes_read = sizeof(buf.vfio_intr_count);
907 RTE_LOG(INFO, EAL, "unexpected intr type\n");
912 * read out to clear the ready-to-be-read flag
916 bytes_read = read(fd, &buf, bytes_read);
917 if (bytes_read < 0) {
918 if (errno == EINTR || errno == EWOULDBLOCK ||
922 "Error reading from fd %d: %s\n",
923 fd, strerror(errno));
924 } else if (bytes_read == 0)
925 RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd);
932 eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
933 struct rte_epoll_event *events)
935 unsigned int i, count = 0;
936 struct rte_epoll_event *rev;
938 for (i = 0; i < n; i++) {
939 rev = evs[i].data.ptr;
940 if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID,
944 events[count].status = RTE_EPOLL_VALID;
945 events[count].fd = rev->fd;
946 events[count].epfd = rev->epfd;
947 events[count].epdata.event = rev->epdata.event;
948 events[count].epdata.data = rev->epdata.data;
949 if (rev->epdata.cb_fun)
950 rev->epdata.cb_fun(rev->fd,
953 rte_compiler_barrier();
954 rev->status = RTE_EPOLL_VALID;
961 eal_init_tls_epfd(void)
963 int pfd = epoll_create(255);
967 "Cannot create epoll instance\n");
974 rte_intr_tls_epfd(void)
976 if (RTE_PER_LCORE(_epfd) == -1)
977 RTE_PER_LCORE(_epfd) = eal_init_tls_epfd();
979 return RTE_PER_LCORE(_epfd);
983 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
984 int maxevents, int timeout)
986 struct epoll_event evs[maxevents];
990 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
994 /* using per thread epoll fd */
995 if (epfd == RTE_EPOLL_PER_THREAD)
996 epfd = rte_intr_tls_epfd();
999 rc = epoll_wait(epfd, evs, maxevents, timeout);
1000 if (likely(rc > 0)) {
1001 /* epoll_wait has at least one fd ready to read */
1002 rc = eal_epoll_process_event(evs, rc, events);
1004 } else if (rc < 0) {
1007 /* epoll_wait fail */
1008 RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
1019 eal_epoll_data_safe_free(struct rte_epoll_event *ev)
1021 while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID,
1023 while (ev->status != RTE_EPOLL_VALID)
1025 memset(&ev->epdata, 0, sizeof(ev->epdata));
1031 rte_epoll_ctl(int epfd, int op, int fd,
1032 struct rte_epoll_event *event)
1034 struct epoll_event ev;
1037 RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
1041 /* using per thread epoll fd */
1042 if (epfd == RTE_EPOLL_PER_THREAD)
1043 epfd = rte_intr_tls_epfd();
1045 if (op == EPOLL_CTL_ADD) {
1046 event->status = RTE_EPOLL_VALID;
1047 event->fd = fd; /* ignore fd in event */
1049 ev.data.ptr = (void *)event;
1052 ev.events = event->epdata.event;
1053 if (epoll_ctl(epfd, op, fd, &ev) < 0) {
1054 RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n",
1055 op, fd, strerror(errno));
1056 if (op == EPOLL_CTL_ADD)
1057 /* rollback status when CTL_ADD fail */
1058 event->status = RTE_EPOLL_INVALID;
1062 if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID)
1063 eal_epoll_data_safe_free(event);
1070 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
1071 int op, unsigned int vec, void *data)
1073 struct rte_epoll_event *rev;
1074 struct rte_epoll_data *epdata;
1078 if (!intr_handle || intr_handle->nb_efd == 0 ||
1079 vec >= intr_handle->nb_efd) {
1080 RTE_LOG(ERR, EAL, "Wrong intr vector number.\n");
1085 case RTE_INTR_EVENT_ADD:
1086 epfd_op = EPOLL_CTL_ADD;
1087 rev = &intr_handle->elist[vec];
1088 if (rev->status != RTE_EPOLL_INVALID) {
1089 RTE_LOG(INFO, EAL, "Event already been added.\n");
1093 /* attach to intr vector fd */
1094 epdata = &rev->epdata;
1095 epdata->event = EPOLLIN | EPOLLPRI | EPOLLET;
1096 epdata->data = data;
1097 epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr;
1098 epdata->cb_arg = (void *)intr_handle;
1099 rc = rte_epoll_ctl(epfd, epfd_op, intr_handle->efds[vec], rev);
1102 "efd %d associated with vec %d added on epfd %d"
1103 "\n", rev->fd, vec, epfd);
1107 case RTE_INTR_EVENT_DEL:
1108 epfd_op = EPOLL_CTL_DEL;
1109 rev = &intr_handle->elist[vec];
1110 if (rev->status == RTE_EPOLL_INVALID) {
1111 RTE_LOG(INFO, EAL, "Event does not exist.\n");
1115 rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev);
1120 RTE_LOG(ERR, EAL, "event op type mismatch\n");
1128 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
1129 int epfd, int op, unsigned int vec, void *data)
1131 RTE_SET_USED(intr_handle);