eal: new function to create control threads
[dpdk.git] / lib / librte_eal / linuxapp / eal / eal_interrupts.c
index 8c5b834..8fee317 100644 (file)
 #include <sys/epoll.h>
 #include <sys/signalfd.h>
 #include <sys/ioctl.h>
+#include <sys/eventfd.h>
+#include <assert.h>
+#include <stdbool.h>
 
 #include <rte_common.h>
 #include <rte_interrupts.h>
 #include <rte_memory.h>
-#include <rte_memzone.h>
 #include <rte_launch.h>
-#include <rte_tailq.h>
 #include <rte_eal.h>
 #include <rte_per_lcore.h>
 #include <rte_lcore.h>
 #include <rte_atomic.h>
 #include <rte_branch_prediction.h>
-#include <rte_ring.h>
 #include <rte_debug.h>
 #include <rte_log.h>
-#include <rte_mempool.h>
-#include <rte_pci.h>
 #include <rte_malloc.h>
 #include <rte_errno.h>
 #include <rte_spinlock.h>
+#include <rte_pause.h>
 
 #include "eal_private.h"
 #include "eal_vfio.h"
+#include "eal_thread.h"
 
 #define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
+#define NB_OTHER_INTR               1
+
+static RTE_DEFINE_PER_LCORE(int, _epfd) = -1; /**< epoll fd per thread */
 
 /**
  * union for pipe fds.
@@ -127,10 +130,13 @@ static pthread_t intr_thread;
 #ifdef VFIO_PRESENT
 
 #define IRQ_SET_BUF_LEN  (sizeof(struct vfio_irq_set) + sizeof(int))
+/* irq set buffer length for queue interrupts and LSC interrupt */
+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
+                             sizeof(int) * (RTE_MAX_RXTX_INTR_VEC_ID + 1))
 
 /* enable legacy (INTx) interrupts */
 static int
-vfio_enable_intx(struct rte_intr_handle *intr_handle) {
+vfio_enable_intx(const struct rte_intr_handle *intr_handle) {
        struct vfio_irq_set *irq_set;
        char irq_set_buf[IRQ_SET_BUF_LEN];
        int len, ret;
@@ -177,7 +183,7 @@ vfio_enable_intx(struct rte_intr_handle *intr_handle) {
 
 /* disable legacy (INTx) interrupts */
 static int
-vfio_disable_intx(struct rte_intr_handle *intr_handle) {
+vfio_disable_intx(const struct rte_intr_handle *intr_handle) {
        struct vfio_irq_set *irq_set;
        char irq_set_buf[IRQ_SET_BUF_LEN];
        int len, ret;
@@ -188,14 +194,14 @@ vfio_disable_intx(struct rte_intr_handle *intr_handle) {
        irq_set = (struct vfio_irq_set *) irq_set_buf;
        irq_set->argsz = len;
        irq_set->count = 1;
-       irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
+       irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK;
        irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
        irq_set->start = 0;
 
        ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
 
        if (ret) {
-               RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
+               RTE_LOG(ERR, EAL, "Error masking INTx interrupts for fd %d\n",
                                                intr_handle->fd);
                return -1;
        }
@@ -218,9 +224,9 @@ vfio_disable_intx(struct rte_intr_handle *intr_handle) {
        return 0;
 }
 
-/* enable MSI-X interrupts */
+/* enable MSI interrupts */
 static int
-vfio_enable_msi(struct rte_intr_handle *intr_handle) {
+vfio_enable_msi(const struct rte_intr_handle *intr_handle) {
        int len, ret;
        char irq_set_buf[IRQ_SET_BUF_LEN];
        struct vfio_irq_set *irq_set;
@@ -244,29 +250,12 @@ vfio_enable_msi(struct rte_intr_handle *intr_handle) {
                                                intr_handle->fd);
                return -1;
        }
-
-       /* manually trigger interrupt to enable it */
-       memset(irq_set, 0, len);
-       len = sizeof(struct vfio_irq_set);
-       irq_set->argsz = len;
-       irq_set->count = 1;
-       irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
-       irq_set->index = VFIO_PCI_MSI_IRQ_INDEX;
-       irq_set->start = 0;
-
-       ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
-
-       if (ret) {
-               RTE_LOG(ERR, EAL, "Error triggering MSI interrupts for fd %d\n",
-                                               intr_handle->fd);
-               return -1;
-       }
        return 0;
 }
 
-/* disable MSI-X interrupts */
+/* disable MSI interrupts */
 static int
-vfio_disable_msi(struct rte_intr_handle *intr_handle) {
+vfio_disable_msi(const struct rte_intr_handle *intr_handle) {
        struct vfio_irq_set *irq_set;
        char irq_set_buf[IRQ_SET_BUF_LEN];
        int len, ret;
@@ -291,9 +280,9 @@ vfio_disable_msi(struct rte_intr_handle *intr_handle) {
 
 /* enable MSI-X interrupts */
 static int
-vfio_enable_msix(struct rte_intr_handle *intr_handle) {
+vfio_enable_msix(const struct rte_intr_handle *intr_handle) {
        int len, ret;
-       char irq_set_buf[IRQ_SET_BUF_LEN];
+       char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
        struct vfio_irq_set *irq_set;
        int *fd_ptr;
 
@@ -301,12 +290,18 @@ vfio_enable_msix(struct rte_intr_handle *intr_handle) {
 
        irq_set = (struct vfio_irq_set *) irq_set_buf;
        irq_set->argsz = len;
-       irq_set->count = 1;
+       /* 0 < irq_set->count < RTE_MAX_RXTX_INTR_VEC_ID + 1 */
+       irq_set->count = intr_handle->max_intr ?
+               (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID + 1 ?
+               RTE_MAX_RXTX_INTR_VEC_ID + 1 : intr_handle->max_intr) : 1;
        irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
        irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
        irq_set->start = 0;
        fd_ptr = (int *) &irq_set->data;
-       *fd_ptr = intr_handle->fd;
+       /* INTR vector offset 0 reserve for non-efds mapping */
+       fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = intr_handle->fd;
+       memcpy(&fd_ptr[RTE_INTR_VEC_RXTX_OFFSET], intr_handle->efds,
+               sizeof(*intr_handle->efds) * intr_handle->nb_efd);
 
        ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
 
@@ -316,30 +311,14 @@ vfio_enable_msix(struct rte_intr_handle *intr_handle) {
                return -1;
        }
 
-       /* manually trigger interrupt to enable it */
-       memset(irq_set, 0, len);
-       len = sizeof(struct vfio_irq_set);
-       irq_set->argsz = len;
-       irq_set->count = 1;
-       irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
-       irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
-       irq_set->start = 0;
-
-       ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
-
-       if (ret) {
-               RTE_LOG(ERR, EAL, "Error triggering MSI-X interrupts for fd %d\n",
-                                               intr_handle->fd);
-               return -1;
-       }
        return 0;
 }
 
 /* disable MSI-X interrupts */
 static int
-vfio_disable_msix(struct rte_intr_handle *intr_handle) {
+vfio_disable_msix(const struct rte_intr_handle *intr_handle) {
        struct vfio_irq_set *irq_set;
-       char irq_set_buf[IRQ_SET_BUF_LEN];
+       char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
        int len, ret;
 
        len = sizeof(struct vfio_irq_set);
@@ -362,7 +341,7 @@ vfio_disable_msix(struct rte_intr_handle *intr_handle) {
 #endif
 
 static int
-uio_intr_disable(struct rte_intr_handle *intr_handle)
+uio_intx_intr_disable(const struct rte_intr_handle *intr_handle)
 {
        unsigned char command_high;
 
@@ -386,7 +365,7 @@ uio_intr_disable(struct rte_intr_handle *intr_handle)
 }
 
 static int
-uio_intr_enable(struct rte_intr_handle *intr_handle)
+uio_intx_intr_enable(const struct rte_intr_handle *intr_handle)
 {
        unsigned char command_high;
 
@@ -409,8 +388,36 @@ uio_intr_enable(struct rte_intr_handle *intr_handle)
        return 0;
 }
 
+static int
+uio_intr_disable(const struct rte_intr_handle *intr_handle)
+{
+       const int value = 0;
+
+       if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
+               RTE_LOG(ERR, EAL,
+                       "Error disabling interrupts for fd %d (%s)\n",
+                       intr_handle->fd, strerror(errno));
+               return -1;
+       }
+       return 0;
+}
+
+static int
+uio_intr_enable(const struct rte_intr_handle *intr_handle)
+{
+       const int value = 1;
+
+       if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
+               RTE_LOG(ERR, EAL,
+                       "Error enabling interrupts for fd %d (%s)\n",
+                       intr_handle->fd, strerror(errno));
+               return -1;
+       }
+       return 0;
+}
+
 int
-rte_intr_callback_register(struct rte_intr_handle *intr_handle,
+rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
                        rte_intr_callback_fn cb, void *cb_arg)
 {
        int ret, wake_thread;
@@ -478,11 +485,11 @@ rte_intr_callback_register(struct rte_intr_handle *intr_handle,
                if (write(intr_pipe.writefd, "1", 1) < 0)
                        return -EPIPE;
 
-       return (ret);
+       return ret;
 }
 
 int
-rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
+rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
                        rte_intr_callback_fn cb_fn, void *cb_arg)
 {
        int ret;
@@ -542,12 +549,15 @@ rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
                ret = -EPIPE;
        }
 
-       return (ret);
+       return ret;
 }
 
 int
-rte_intr_enable(struct rte_intr_handle *intr_handle)
+rte_intr_enable(const struct rte_intr_handle *intr_handle)
 {
+       if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
+               return 0;
+
        if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
                return -1;
 
@@ -557,6 +567,10 @@ rte_intr_enable(struct rte_intr_handle *intr_handle)
                if (uio_intr_enable(intr_handle))
                        return -1;
                break;
+       case RTE_INTR_HANDLE_UIO_INTX:
+               if (uio_intx_intr_enable(intr_handle))
+                       return -1;
+               break;
        /* not used at this moment */
        case RTE_INTR_HANDLE_ALARM:
                return -1;
@@ -586,8 +600,11 @@ rte_intr_enable(struct rte_intr_handle *intr_handle)
 }
 
 int
-rte_intr_disable(struct rte_intr_handle *intr_handle)
+rte_intr_disable(const struct rte_intr_handle *intr_handle)
 {
+       if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
+               return 0;
+
        if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
                return -1;
 
@@ -597,6 +614,10 @@ rte_intr_disable(struct rte_intr_handle *intr_handle)
                if (uio_intr_disable(intr_handle))
                        return -1;
                break;
+       case RTE_INTR_HANDLE_UIO_INTX:
+               if (uio_intx_intr_disable(intr_handle))
+                       return -1;
+               break;
        /* not used at this moment */
        case RTE_INTR_HANDLE_ALARM:
                return -1;
@@ -628,6 +649,7 @@ rte_intr_disable(struct rte_intr_handle *intr_handle)
 static int
 eal_intr_process_interrupts(struct epoll_event *events, int nfds)
 {
+       bool call = false;
        int n, bytes_read;
        struct rte_intr_source *src;
        struct rte_intr_callback *cb;
@@ -663,6 +685,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
                /* set the length to be read dor different handle type */
                switch (src->intr_handle.type) {
                case RTE_INTR_HANDLE_UIO:
+               case RTE_INTR_HANDLE_UIO_INTX:
                        bytes_read = sizeof(buf.uio_intr_count);
                        break;
                case RTE_INTR_HANDLE_ALARM:
@@ -675,29 +698,42 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
                        bytes_read = sizeof(buf.vfio_intr_count);
                        break;
 #endif
+               case RTE_INTR_HANDLE_VDEV:
+               case RTE_INTR_HANDLE_EXT:
+                       bytes_read = 0;
+                       call = true;
+                       break;
+
                default:
                        bytes_read = 1;
                        break;
                }
 
-               /**
-                * read out to clear the ready-to-be-read flag
-                * for epoll_wait.
-                */
-               bytes_read = read(events[n].data.fd, &buf, bytes_read);
-
-               if (bytes_read < 0)
-                       RTE_LOG(ERR, EAL, "Error reading from file "
-                               "descriptor %d: %s\n", events[n].data.fd,
-                                                       strerror(errno));
-               else if (bytes_read == 0)
-                       RTE_LOG(ERR, EAL, "Read nothing from file "
-                               "descriptor %d\n", events[n].data.fd);
+               if (bytes_read > 0) {
+                       /**
+                        * read out to clear the ready-to-be-read flag
+                        * for epoll_wait.
+                        */
+                       bytes_read = read(events[n].data.fd, &buf, bytes_read);
+                       if (bytes_read < 0) {
+                               if (errno == EINTR || errno == EWOULDBLOCK)
+                                       continue;
+
+                               RTE_LOG(ERR, EAL, "Error reading from file "
+                                       "descriptor %d: %s\n",
+                                       events[n].data.fd,
+                                       strerror(errno));
+                       } else if (bytes_read == 0)
+                               RTE_LOG(ERR, EAL, "Read nothing from file "
+                                       "descriptor %d\n", events[n].data.fd);
+                       else
+                               call = true;
+               }
 
                /* grab a lock, again to call callbacks and update status. */
                rte_spinlock_lock(&intr_lock);
 
-               if (bytes_read > 0) {
+               if (call) {
 
                        /* Finally, call all callbacks. */
                        TAILQ_FOREACH(cb, &src->callbacks, next) {
@@ -707,8 +743,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
                                rte_spinlock_unlock(&intr_lock);
 
                                /* call the actual callback */
-                               active_cb.cb_fn(&src->intr_handle,
-                                       active_cb.cb_arg);
+                               active_cb.cb_fn(active_cb.cb_arg);
 
                                /*get the lock back. */
                                rte_spinlock_lock(&intr_lock);
@@ -808,7 +843,7 @@ eal_intr_thread_main(__rte_unused void *arg)
                TAILQ_FOREACH(src, &intr_sources, next) {
                        if (src->callbacks.tqh_first == NULL)
                                continue; /* skip those with no callbacks */
-                       ev.events = EPOLLIN | EPOLLPRI;
+                       ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP;
                        ev.data.fd = src->intr_handle.fd;
 
                        /**
@@ -838,7 +873,8 @@ eal_intr_thread_main(__rte_unused void *arg)
 int
 rte_eal_intr_init(void)
 {
-       int ret = 0;
+       int ret = 0, ret_1 = 0;
+       char thread_name[RTE_MAX_THREAD_NAME_LEN];
 
        /* init the global interrupt source head */
        TAILQ_INIT(&intr_sources);
@@ -847,16 +883,379 @@ rte_eal_intr_init(void)
         * create a pipe which will be waited by epoll and notified to
         * rebuild the wait list of epoll.
         */
-       if (pipe(intr_pipe.pipefd) < 0)
+       if (pipe(intr_pipe.pipefd) < 0) {
+               rte_errno = errno;
                return -1;
+       }
 
        /* create the host thread to wait/handle the interrupt */
-       ret = pthread_create(&intr_thread, NULL,
+       ret = rte_ctrl_thread_create(&intr_thread, NULL,
                        eal_intr_thread_main, NULL);
-       if (ret != 0)
+       if (ret != 0) {
+               rte_errno = -ret;
                RTE_LOG(ERR, EAL,
                        "Failed to create thread for interrupt handling\n");
+       } else {
+               /* Set thread_name for aid in debugging. */
+               snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
+                       "eal-intr-thread");
+               ret_1 = rte_thread_setname(intr_thread, thread_name);
+               if (ret_1 != 0)
+                       RTE_LOG(DEBUG, EAL,
+                       "Failed to set thread name for interrupt handling\n");
+       }
+
+       return ret;
+}
+
+static void
+eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
+{
+       union rte_intr_read_buffer buf;
+       int bytes_read = 0;
+       int nbytes;
+
+       switch (intr_handle->type) {
+       case RTE_INTR_HANDLE_UIO:
+       case RTE_INTR_HANDLE_UIO_INTX:
+               bytes_read = sizeof(buf.uio_intr_count);
+               break;
+#ifdef VFIO_PRESENT
+       case RTE_INTR_HANDLE_VFIO_MSIX:
+       case RTE_INTR_HANDLE_VFIO_MSI:
+       case RTE_INTR_HANDLE_VFIO_LEGACY:
+               bytes_read = sizeof(buf.vfio_intr_count);
+               break;
+#endif
+       case RTE_INTR_HANDLE_VDEV:
+               bytes_read = intr_handle->efd_counter_size;
+               /* For vdev, number of bytes to read is set by driver */
+               break;
+       case RTE_INTR_HANDLE_EXT:
+               return;
+       default:
+               bytes_read = 1;
+               RTE_LOG(INFO, EAL, "unexpected intr type\n");
+               break;
+       }
+
+       /**
+        * read out to clear the ready-to-be-read flag
+        * for epoll_wait.
+        */
+       if (bytes_read == 0)
+               return;
+       do {
+               nbytes = read(fd, &buf, bytes_read);
+               if (nbytes < 0) {
+                       if (errno == EINTR || errno == EWOULDBLOCK ||
+                           errno == EAGAIN)
+                               continue;
+                       RTE_LOG(ERR, EAL,
+                               "Error reading from fd %d: %s\n",
+                               fd, strerror(errno));
+               } else if (nbytes == 0)
+                       RTE_LOG(ERR, EAL, "Read nothing from fd %d\n", fd);
+               return;
+       } while (1);
+}
+
+static int
+eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
+                       struct rte_epoll_event *events)
+{
+       unsigned int i, count = 0;
+       struct rte_epoll_event *rev;
+
+       for (i = 0; i < n; i++) {
+               rev = evs[i].data.ptr;
+               if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID,
+                                                RTE_EPOLL_EXEC))
+                       continue;
+
+               events[count].status        = RTE_EPOLL_VALID;
+               events[count].fd            = rev->fd;
+               events[count].epfd          = rev->epfd;
+               events[count].epdata.event  = rev->epdata.event;
+               events[count].epdata.data   = rev->epdata.data;
+               if (rev->epdata.cb_fun)
+                       rev->epdata.cb_fun(rev->fd,
+                                          rev->epdata.cb_arg);
+
+               rte_compiler_barrier();
+               rev->status = RTE_EPOLL_VALID;
+               count++;
+       }
+       return count;
+}
+
+static inline int
+eal_init_tls_epfd(void)
+{
+       int pfd = epoll_create(255);
+
+       if (pfd < 0) {
+               RTE_LOG(ERR, EAL,
+                       "Cannot create epoll instance\n");
+               return -1;
+       }
+       return pfd;
+}
+
+int
+rte_intr_tls_epfd(void)
+{
+       if (RTE_PER_LCORE(_epfd) == -1)
+               RTE_PER_LCORE(_epfd) = eal_init_tls_epfd();
+
+       return RTE_PER_LCORE(_epfd);
+}
+
+int
+rte_epoll_wait(int epfd, struct rte_epoll_event *events,
+              int maxevents, int timeout)
+{
+       struct epoll_event evs[maxevents];
+       int rc;
+
+       if (!events) {
+               RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
+               return -1;
+       }
+
+       /* using per thread epoll fd */
+       if (epfd == RTE_EPOLL_PER_THREAD)
+               epfd = rte_intr_tls_epfd();
+
+       while (1) {
+               rc = epoll_wait(epfd, evs, maxevents, timeout);
+               if (likely(rc > 0)) {
+                       /* epoll_wait has at least one fd ready to read */
+                       rc = eal_epoll_process_event(evs, rc, events);
+                       break;
+               } else if (rc < 0) {
+                       if (errno == EINTR)
+                               continue;
+                       /* epoll_wait fail */
+                       RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
+                               strerror(errno));
+                       rc = -1;
+                       break;
+               } else {
+                       /* rc == 0, epoll_wait timed out */
+                       break;
+               }
+       }
+
+       return rc;
+}
+
+static inline void
+eal_epoll_data_safe_free(struct rte_epoll_event *ev)
+{
+       while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID,
+                                   RTE_EPOLL_INVALID))
+               while (ev->status != RTE_EPOLL_VALID)
+                       rte_pause();
+       memset(&ev->epdata, 0, sizeof(ev->epdata));
+       ev->fd = -1;
+       ev->epfd = -1;
+}
+
+int
+rte_epoll_ctl(int epfd, int op, int fd,
+             struct rte_epoll_event *event)
+{
+       struct epoll_event ev;
+
+       if (!event) {
+               RTE_LOG(ERR, EAL, "rte_epoll_event can't be NULL\n");
+               return -1;
+       }
+
+       /* using per thread epoll fd */
+       if (epfd == RTE_EPOLL_PER_THREAD)
+               epfd = rte_intr_tls_epfd();
+
+       if (op == EPOLL_CTL_ADD) {
+               event->status = RTE_EPOLL_VALID;
+               event->fd = fd;  /* ignore fd in event */
+               event->epfd = epfd;
+               ev.data.ptr = (void *)event;
+       }
+
+       ev.events = event->epdata.event;
+       if (epoll_ctl(epfd, op, fd, &ev) < 0) {
+               RTE_LOG(ERR, EAL, "Error op %d fd %d epoll_ctl, %s\n",
+                       op, fd, strerror(errno));
+               if (op == EPOLL_CTL_ADD)
+                       /* rollback status when CTL_ADD fail */
+                       event->status = RTE_EPOLL_INVALID;
+               return -1;
+       }
+
+       if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID)
+               eal_epoll_data_safe_free(event);
 
-       return -ret;
+       return 0;
 }
 
+int
+rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
+               int op, unsigned int vec, void *data)
+{
+       struct rte_epoll_event *rev;
+       struct rte_epoll_data *epdata;
+       int epfd_op;
+       unsigned int efd_idx;
+       int rc = 0;
+
+       efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
+               (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
+
+       if (!intr_handle || intr_handle->nb_efd == 0 ||
+           efd_idx >= intr_handle->nb_efd) {
+               RTE_LOG(ERR, EAL, "Wrong intr vector number.\n");
+               return -EPERM;
+       }
+
+       switch (op) {
+       case RTE_INTR_EVENT_ADD:
+               epfd_op = EPOLL_CTL_ADD;
+               rev = &intr_handle->elist[efd_idx];
+               if (rev->status != RTE_EPOLL_INVALID) {
+                       RTE_LOG(INFO, EAL, "Event already been added.\n");
+                       return -EEXIST;
+               }
+
+               /* attach to intr vector fd */
+               epdata = &rev->epdata;
+               epdata->event  = EPOLLIN | EPOLLPRI | EPOLLET;
+               epdata->data   = data;
+               epdata->cb_fun = (rte_intr_event_cb_t)eal_intr_proc_rxtx_intr;
+               epdata->cb_arg = (void *)intr_handle;
+               rc = rte_epoll_ctl(epfd, epfd_op,
+                                  intr_handle->efds[efd_idx], rev);
+               if (!rc)
+                       RTE_LOG(DEBUG, EAL,
+                               "efd %d associated with vec %d added on epfd %d"
+                               "\n", rev->fd, vec, epfd);
+               else
+                       rc = -EPERM;
+               break;
+       case RTE_INTR_EVENT_DEL:
+               epfd_op = EPOLL_CTL_DEL;
+               rev = &intr_handle->elist[efd_idx];
+               if (rev->status == RTE_EPOLL_INVALID) {
+                       RTE_LOG(INFO, EAL, "Event does not exist.\n");
+                       return -EPERM;
+               }
+
+               rc = rte_epoll_ctl(rev->epfd, epfd_op, rev->fd, rev);
+               if (rc)
+                       rc = -EPERM;
+               break;
+       default:
+               RTE_LOG(ERR, EAL, "event op type mismatch\n");
+               rc = -EPERM;
+       }
+
+       return rc;
+}
+
+void
+rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
+{
+       uint32_t i;
+       struct rte_epoll_event *rev;
+
+       for (i = 0; i < intr_handle->nb_efd; i++) {
+               rev = &intr_handle->elist[i];
+               if (rev->status == RTE_EPOLL_INVALID)
+                       continue;
+               if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
+                       /* force free if the entry valid */
+                       eal_epoll_data_safe_free(rev);
+                       rev->status = RTE_EPOLL_INVALID;
+               }
+       }
+}
+
+int
+rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
+{
+       uint32_t i;
+       int fd;
+       uint32_t n = RTE_MIN(nb_efd, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+
+       assert(nb_efd != 0);
+
+       if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX) {
+               for (i = 0; i < n; i++) {
+                       fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+                       if (fd < 0) {
+                               RTE_LOG(ERR, EAL,
+                                       "can't setup eventfd, error %i (%s)\n",
+                                       errno, strerror(errno));
+                               return -errno;
+                       }
+                       intr_handle->efds[i] = fd;
+               }
+               intr_handle->nb_efd   = n;
+               intr_handle->max_intr = NB_OTHER_INTR + n;
+       } else if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
+               /* only check, initialization would be done in vdev driver.*/
+               if (intr_handle->efd_counter_size >
+                   sizeof(union rte_intr_read_buffer)) {
+                       RTE_LOG(ERR, EAL, "the efd_counter_size is oversized");
+                       return -EINVAL;
+               }
+       } else {
+               intr_handle->efds[0]  = intr_handle->fd;
+               intr_handle->nb_efd   = RTE_MIN(nb_efd, 1U);
+               intr_handle->max_intr = NB_OTHER_INTR;
+       }
+
+       return 0;
+}
+
+void
+rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
+{
+       uint32_t i;
+
+       rte_intr_free_epoll_fd(intr_handle);
+       if (intr_handle->max_intr > intr_handle->nb_efd) {
+               for (i = 0; i < intr_handle->nb_efd; i++)
+                       close(intr_handle->efds[i]);
+       }
+       intr_handle->nb_efd = 0;
+       intr_handle->max_intr = 0;
+}
+
+int
+rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
+{
+       return !(!intr_handle->nb_efd);
+}
+
+int
+rte_intr_allow_others(struct rte_intr_handle *intr_handle)
+{
+       if (!rte_intr_dp_is_en(intr_handle))
+               return 1;
+       else
+               return !!(intr_handle->max_intr - intr_handle->nb_efd);
+}
+
+int
+rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
+{
+       if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)
+               return 1;
+
+       if (intr_handle->type == RTE_INTR_HANDLE_VDEV)
+               return 1;
+
+       return 0;
+}