X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Flinux%2Feal_interrupts.c;h=1dd994bd1fc6670ff22ee29ce70858c1ad2ecef2;hb=e863fe3a13da89787fdf3b5c590101a3c0f10af6;hp=cb8e10709839069ca97ea8e5580db89c25d7db7e;hpb=a083f8cc77460c15ac99a427ab6833dc8c8ae5bc;p=dpdk.git diff --git a/lib/librte_eal/linux/eal_interrupts.c b/lib/librte_eal/linux/eal_interrupts.c index cb8e107098..1dd994bd1f 100644 --- a/lib/librte_eal/linux/eal_interrupts.c +++ b/lib/librte_eal/linux/eal_interrupts.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -34,6 +33,7 @@ #include #include #include +#include #include "eal_private.h" #include "eal_vfio.h" @@ -539,8 +539,9 @@ rte_intr_callback_register(const struct rte_intr_handle *intr_handle, */ if (wake_thread) if (write(intr_pipe.writefd, "1", 1) < 0) - return -EPIPE; + ret = -EPIPE; + rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret); return ret; } @@ -656,63 +657,78 @@ rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle, ret = -EPIPE; } + rte_eal_trace_intr_callback_unregister(intr_handle, cb_fn, cb_arg, + ret); return ret; } int rte_intr_enable(const struct rte_intr_handle *intr_handle) { - if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV) - return 0; + int rc = 0; - if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) + if (intr_handle == NULL) return -1; + if (intr_handle->type == RTE_INTR_HANDLE_VDEV) { + rc = 0; + goto out; + } + + if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) { + rc = -1; + goto out; + } + switch (intr_handle->type){ /* write to the uio fd to enable the interrupt */ case RTE_INTR_HANDLE_UIO: if (uio_intr_enable(intr_handle)) - return -1; + rc = -1; break; case RTE_INTR_HANDLE_UIO_INTX: if (uio_intx_intr_enable(intr_handle)) - return -1; + rc = -1; break; /* not used at this moment */ case RTE_INTR_HANDLE_ALARM: - return -1; + rc = -1; + break; #ifdef VFIO_PRESENT case RTE_INTR_HANDLE_VFIO_MSIX: if (vfio_enable_msix(intr_handle)) - return -1; + rc = -1; break; case RTE_INTR_HANDLE_VFIO_MSI: if (vfio_enable_msi(intr_handle)) - return -1; + rc = -1; break; case RTE_INTR_HANDLE_VFIO_LEGACY: if (vfio_enable_intx(intr_handle)) - return -1; + rc = -1; break; #ifdef HAVE_VFIO_DEV_REQ_INTERFACE case RTE_INTR_HANDLE_VFIO_REQ: if (vfio_enable_req(intr_handle)) - return -1; + rc = -1; break; #endif #endif /* not used at this moment */ case RTE_INTR_HANDLE_DEV_EVENT: - return -1; + rc = -1; + break; /* unknown handle type */ default: RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n", intr_handle->fd); - return -1; + rc = -1; + break; } - - return 0; +out: + rte_eal_trace_intr_enable(intr_handle, rc); + return rc; } /** @@ -778,57 +794,70 @@ rte_intr_ack(const struct rte_intr_handle *intr_handle) int rte_intr_disable(const struct rte_intr_handle *intr_handle) { - if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV) - return 0; + int rc = 0; - if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) + if (intr_handle == NULL) return -1; + if (intr_handle->type == RTE_INTR_HANDLE_VDEV) { + rc = 0; + goto out; + } + + if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) { + rc = -1; + goto out; + } + switch (intr_handle->type){ /* write to the uio fd to disable the interrupt */ case RTE_INTR_HANDLE_UIO: if (uio_intr_disable(intr_handle)) - return -1; + rc = -1; break; case RTE_INTR_HANDLE_UIO_INTX: if (uio_intx_intr_disable(intr_handle)) - return -1; + rc = -1; break; /* not used at this moment */ case RTE_INTR_HANDLE_ALARM: - return -1; + rc = -1; + break; #ifdef VFIO_PRESENT case RTE_INTR_HANDLE_VFIO_MSIX: if (vfio_disable_msix(intr_handle)) - return -1; + rc = -1; break; case RTE_INTR_HANDLE_VFIO_MSI: if (vfio_disable_msi(intr_handle)) - return -1; + rc = -1; break; case RTE_INTR_HANDLE_VFIO_LEGACY: if (vfio_disable_intx(intr_handle)) - return -1; + rc = -1; break; #ifdef HAVE_VFIO_DEV_REQ_INTERFACE case RTE_INTR_HANDLE_VFIO_REQ: if (vfio_disable_req(intr_handle)) - return -1; + rc = -1; break; #endif #endif /* not used at this moment */ case RTE_INTR_HANDLE_DEV_EVENT: - return -1; + rc = -1; + break; /* unknown handle type */ default: RTE_LOG(ERR, EAL, "Unknown handle type of fd %d\n", intr_handle->fd); - return -1; + rc = -1; + break; } - - return 0; +out: + rte_eal_trace_intr_disable(intr_handle, rc); + return rc; } static int @@ -984,7 +1013,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds) } /* notify the pipe fd waited by epoll_wait to rebuild the wait list */ - if (rv >= 0 && write(intr_pipe.writefd, "1", 1) < 0) { + if (rv > 0 && write(intr_pipe.writefd, "1", 1) < 0) { rte_spinlock_unlock(&intr_lock); return -EPIPE; } @@ -1042,7 +1071,7 @@ eal_intr_handle_interrupts(int pfd, unsigned totalfds) * @return * never return; */ -static __attribute__((noreturn)) void * +static __rte_noreturn void * eal_intr_thread_main(__rte_unused void *arg) { /* host thread, never break out */ @@ -1195,24 +1224,34 @@ eal_epoll_process_event(struct epoll_event *evs, unsigned int n, { unsigned int i, count = 0; struct rte_epoll_event *rev; + uint32_t valid_status; for (i = 0; i < n; i++) { rev = evs[i].data.ptr; - if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID, - RTE_EPOLL_EXEC)) + valid_status = RTE_EPOLL_VALID; + /* ACQUIRE memory ordering here pairs with RELEASE + * ordering below acting as a lock to synchronize + * the event data updating. + */ + if (!rev || !__atomic_compare_exchange_n(&rev->status, + &valid_status, RTE_EPOLL_EXEC, 0, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) continue; events[count].status = RTE_EPOLL_VALID; events[count].fd = rev->fd; events[count].epfd = rev->epfd; - events[count].epdata.event = rev->epdata.event; + events[count].epdata.event = evs[i].events; events[count].epdata.data = rev->epdata.data; if (rev->epdata.cb_fun) rev->epdata.cb_fun(rev->fd, rev->epdata.cb_arg); - rte_compiler_barrier(); - rev->status = RTE_EPOLL_VALID; + /* the status update should be observed after + * the other fields change. + */ + __atomic_store_n(&rev->status, RTE_EPOLL_VALID, + __ATOMIC_RELEASE); count++; } return count; @@ -1240,9 +1279,9 @@ rte_intr_tls_epfd(void) return RTE_PER_LCORE(_epfd); } -int -rte_epoll_wait(int epfd, struct rte_epoll_event *events, - int maxevents, int timeout) +static int +eal_epoll_wait(int epfd, struct rte_epoll_event *events, + int maxevents, int timeout, bool interruptible) { struct epoll_event evs[maxevents]; int rc; @@ -1263,8 +1302,12 @@ rte_epoll_wait(int epfd, struct rte_epoll_event *events, rc = eal_epoll_process_event(evs, rc, events); break; } else if (rc < 0) { - if (errno == EINTR) - continue; + if (errno == EINTR) { + if (interruptible) + return -1; + else + continue; + } /* epoll_wait fail */ RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n", strerror(errno)); @@ -1279,13 +1322,32 @@ rte_epoll_wait(int epfd, struct rte_epoll_event *events, return rc; } +int +rte_epoll_wait(int epfd, struct rte_epoll_event *events, + int maxevents, int timeout) +{ + return eal_epoll_wait(epfd, events, maxevents, timeout, false); +} + +int +rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events, + int maxevents, int timeout) +{ + return eal_epoll_wait(epfd, events, maxevents, timeout, true); +} + static inline void eal_epoll_data_safe_free(struct rte_epoll_event *ev) { - while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID, - RTE_EPOLL_INVALID)) - while (ev->status != RTE_EPOLL_VALID) + uint32_t valid_status = RTE_EPOLL_VALID; + + while (!__atomic_compare_exchange_n(&ev->status, &valid_status, + RTE_EPOLL_INVALID, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { + while (__atomic_load_n(&ev->status, + __ATOMIC_RELAXED) != RTE_EPOLL_VALID) rte_pause(); + valid_status = RTE_EPOLL_VALID; + } memset(&ev->epdata, 0, sizeof(ev->epdata)); ev->fd = -1; ev->epfd = -1; @@ -1307,7 +1369,8 @@ rte_epoll_ctl(int epfd, int op, int fd, epfd = rte_intr_tls_epfd(); if (op == EPOLL_CTL_ADD) { - event->status = RTE_EPOLL_VALID; + __atomic_store_n(&event->status, RTE_EPOLL_VALID, + __ATOMIC_RELAXED); event->fd = fd; /* ignore fd in event */ event->epfd = epfd; ev.data.ptr = (void *)event; @@ -1319,11 +1382,13 @@ rte_epoll_ctl(int epfd, int op, int fd, op, fd, strerror(errno)); if (op == EPOLL_CTL_ADD) /* rollback status when CTL_ADD fail */ - event->status = RTE_EPOLL_INVALID; + __atomic_store_n(&event->status, RTE_EPOLL_INVALID, + __ATOMIC_RELAXED); return -1; } - if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID) + if (op == EPOLL_CTL_DEL && __atomic_load_n(&event->status, + __ATOMIC_RELAXED) != RTE_EPOLL_INVALID) eal_epoll_data_safe_free(event); return 0; @@ -1352,7 +1417,8 @@ rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd, case RTE_INTR_EVENT_ADD: epfd_op = EPOLL_CTL_ADD; rev = &intr_handle->elist[efd_idx]; - if (rev->status != RTE_EPOLL_INVALID) { + if (__atomic_load_n(&rev->status, + __ATOMIC_RELAXED) != RTE_EPOLL_INVALID) { RTE_LOG(INFO, EAL, "Event already been added.\n"); return -EEXIST; } @@ -1375,7 +1441,8 @@ rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd, case RTE_INTR_EVENT_DEL: epfd_op = EPOLL_CTL_DEL; rev = &intr_handle->elist[efd_idx]; - if (rev->status == RTE_EPOLL_INVALID) { + if (__atomic_load_n(&rev->status, + __ATOMIC_RELAXED) == RTE_EPOLL_INVALID) { RTE_LOG(INFO, EAL, "Event does not exist.\n"); return -EPERM; } @@ -1400,12 +1467,12 @@ rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle) for (i = 0; i < intr_handle->nb_efd; i++) { rev = &intr_handle->elist[i]; - if (rev->status == RTE_EPOLL_INVALID) + if (__atomic_load_n(&rev->status, + __ATOMIC_RELAXED) == RTE_EPOLL_INVALID) continue; if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) { /* force free if the entry valid */ eal_epoll_data_safe_free(rev); - rev->status = RTE_EPOLL_INVALID; } } }