TAILQ_ENTRY(rte_intr_callback) next;
rte_intr_callback_fn cb_fn; /**< callback address */
void *cb_arg; /**< parameter for callback */
+ uint8_t pending_delete; /**< delete after callback is called */
+ rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
};
struct rte_intr_source {
return 0;
}
+/* unmask/ack legacy (INTx) interrupts */
+static int
+vfio_ack_intx(const struct rte_intr_handle *intr_handle)
+{
+ struct vfio_irq_set irq_set;
+
+ /* unmask INTx */
+ memset(&irq_set, 0, sizeof(irq_set));
+ irq_set.argsz = sizeof(irq_set);
+ irq_set.count = 1;
+ irq_set.flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
+ irq_set.index = VFIO_PCI_INTX_IRQ_INDEX;
+ irq_set.start = 0;
+
+ if (ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, &irq_set)) {
+ RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+ return 0;
+}
+
/* enable MSI interrupts */
static int
vfio_enable_msi(const struct rte_intr_handle *intr_handle) {
}
callback->cb_fn = cb;
callback->cb_arg = cb_arg;
+ callback->pending_delete = 0;
+ callback->ucb_fn = NULL;
rte_spinlock_lock(&intr_lock);
return ret;
}
+int
+rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb_fn, void *cb_arg,
+ rte_intr_unregister_callback_fn ucb_fn)
+{
+ int ret;
+ struct rte_intr_source *src;
+ struct rte_intr_callback *cb, *next;
+
+ /* do parameter checking first */
+ if (intr_handle == NULL || intr_handle->fd < 0) {
+ RTE_LOG(ERR, EAL,
+ "Unregistering with invalid input parameter\n");
+ return -EINVAL;
+ }
+
+ rte_spinlock_lock(&intr_lock);
+
+ /* check if the insterrupt source for the fd is existent */
+ TAILQ_FOREACH(src, &intr_sources, next)
+ if (src->intr_handle.fd == intr_handle->fd)
+ break;
+
+ /* No interrupt source registered for the fd */
+ if (src == NULL) {
+ ret = -ENOENT;
+
+ /* only usable if the source is active */
+ } else if (src->active == 0) {
+ ret = -EAGAIN;
+
+ } else {
+ ret = 0;
+
+ /* walk through the callbacks and mark all that match. */
+ for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
+ next = TAILQ_NEXT(cb, next);
+ if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
+ cb->cb_arg == cb_arg)) {
+ cb->pending_delete = 1;
+ cb->ucb_fn = ucb_fn;
+ ret++;
+ }
+ }
+ }
+
+ rte_spinlock_unlock(&intr_lock);
+
+ return ret;
+}
+
int
rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb_fn, void *cb_arg)
return 0;
}
+/**
+ * PMD generally calls this function at the end of its IRQ callback.
+ * Internally, it unmasks the interrupt if possible.
+ *
+ * For INTx, unmasking is required as the interrupt is auto-masked prior to
+ * invoking callback.
+ *
+ * For MSI/MSI-X, unmasking is typically not needed as the interrupt is not
+ * auto-masked. In fact, for interrupt handle types VFIO_MSIX and VFIO_MSI,
+ * this function is no-op.
+ */
+int
+rte_intr_ack(const struct rte_intr_handle *intr_handle)
+{
+ if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
+ return 0;
+
+ if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
+ return -1;
+
+ switch (intr_handle->type) {
+ /* Both acking and enabling are same for UIO */
+ case RTE_INTR_HANDLE_UIO:
+ if (uio_intr_enable(intr_handle))
+ return -1;
+ break;
+ case RTE_INTR_HANDLE_UIO_INTX:
+ if (uio_intx_intr_enable(intr_handle))
+ return -1;
+ break;
+ /* not used at this moment */
+ case RTE_INTR_HANDLE_ALARM:
+ return -1;
+#ifdef VFIO_PRESENT
+ /* VFIO MSI* is implicitly acked unlike INTx, nothing to do */
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ return 0;
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ if (vfio_ack_intx(intr_handle))
+ return -1;
+ break;
+#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
+ case RTE_INTR_HANDLE_VFIO_REQ:
+ return -1;
+#endif
+#endif
+ /* not used at this moment */
+ case RTE_INTR_HANDLE_DEV_EVENT:
+ return -1;
+ /* unknown handle type */
+ default:
+ RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+
+ return 0;
+}
+
int
rte_intr_disable(const struct rte_intr_handle *intr_handle)
{
eal_intr_process_interrupts(struct epoll_event *events, int nfds)
{
bool call = false;
- int n, bytes_read;
+ int n, bytes_read, rv;
struct rte_intr_source *src;
struct rte_intr_callback *cb, *next;
union rte_intr_read_buffer buf;
rte_spinlock_lock(&intr_lock);
}
}
-
/* we done with that interrupt source, release it. */
src->active = 0;
+
+ rv = 0;
+
+ /* check if any callback are supposed to be removed */
+ for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
+ next = TAILQ_NEXT(cb, next);
+ if (cb->pending_delete) {
+ TAILQ_REMOVE(&src->callbacks, cb, next);
+ if (cb->ucb_fn)
+ cb->ucb_fn(&src->intr_handle, cb->cb_arg);
+ free(cb);
+ rv++;
+ }
+ }
+
+ /* all callbacks for that source are removed. */
+ if (TAILQ_EMPTY(&src->callbacks)) {
+ TAILQ_REMOVE(&intr_sources, src, next);
+ free(src);
+ }
+
+ /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
+ if (rv >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
+ rte_spinlock_unlock(&intr_lock);
+ return -EPIPE;
+ }
+
rte_spinlock_unlock(&intr_lock);
}
static __attribute__((noreturn)) void *
eal_intr_thread_main(__rte_unused void *arg)
{
- struct epoll_event ev;
-
/* host thread, never break out */
for (;;) {
/* build up the epoll fd with all descriptors we are to
rte_spinlock_lock(&intr_lock);
TAILQ_FOREACH(src, &intr_sources, next) {
+ struct epoll_event ev;
+
if (src->callbacks.tqh_first == NULL)
continue; /* skip those with no callbacks */
+ memset(&ev, 0, sizeof(ev));
ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP;
ev.data.fd = src->intr_handle.fd;
return 0;
}
+
+int rte_thread_is_intr(void)
+{
+ return pthread_equal(intr_thread, pthread_self());
+}