#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_debug.h>
#include <rte_log.h>
#include <rte_spinlock.h>
#include <rte_pause.h>
#include <rte_vfio.h>
+#include <rte_eal_trace.h>
#include "eal_private.h"
#include "eal_vfio.h"
*/
if (wake_thread)
if (write(intr_pipe.writefd, "1", 1) < 0)
- return -EPIPE;
+ ret = -EPIPE;
+ rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
return ret;
}
ret = -EPIPE;
}
+ rte_eal_trace_intr_callback_unregister(intr_handle, cb_fn, cb_arg,
+ ret);
return ret;
}
int
rte_intr_enable(const struct rte_intr_handle *intr_handle)
{
- if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
- return 0;
+ int rc = 0;
- if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
+ if (intr_handle == NULL)
return -1;
+ if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
+ rc = 0;
+ goto out;
+ }
+
+ if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
+ rc = -1;
+ goto out;
+ }
+
switch (intr_handle->type){
/* write to the uio fd to enable the interrupt */
case RTE_INTR_HANDLE_UIO:
if (uio_intr_enable(intr_handle))
- return -1;
+ rc = -1;
break;
case RTE_INTR_HANDLE_UIO_INTX:
if (uio_intx_intr_enable(intr_handle))
- return -1;
+ rc = -1;
break;
/* not used at this moment */
case RTE_INTR_HANDLE_ALARM:
- return -1;
+ rc = -1;
+ break;
#ifdef VFIO_PRESENT
case RTE_INTR_HANDLE_VFIO_MSIX:
if (vfio_enable_msix(intr_handle))
- return -1;
+ rc = -1;
break;
case RTE_INTR_HANDLE_VFIO_MSI:
if (vfio_enable_msi(intr_handle))
- return -1;
+ rc = -1;
break;
case RTE_INTR_HANDLE_VFIO_LEGACY:
if (vfio_enable_intx(intr_handle))
- return -1;
+ rc = -1;
break;
#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
case RTE_INTR_HANDLE_VFIO_REQ:
if (vfio_enable_req(intr_handle))
- return -1;
+ rc = -1;
break;
#endif
#endif
/* not used at this moment */
case RTE_INTR_HANDLE_DEV_EVENT:
- return -1;
+ rc = -1;
+ break;
/* unknown handle type */
default:
RTE_LOG(ERR, EAL,
"Unknown handle type of fd %d\n",
intr_handle->fd);
- return -1;
+ rc = -1;
+ break;
}
-
- return 0;
+out:
+ rte_eal_trace_intr_enable(intr_handle, rc);
+ return rc;
}
/**
int
rte_intr_disable(const struct rte_intr_handle *intr_handle)
{
- if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
- return 0;
+ int rc = 0;
- if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
+ if (intr_handle == NULL)
return -1;
+ if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
+ rc = 0;
+ goto out;
+ }
+
+ if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
+ rc = -1;
+ goto out;
+ }
+
switch (intr_handle->type){
/* write to the uio fd to disable the interrupt */
case RTE_INTR_HANDLE_UIO:
if (uio_intr_disable(intr_handle))
- return -1;
+ rc = -1;
break;
case RTE_INTR_HANDLE_UIO_INTX:
if (uio_intx_intr_disable(intr_handle))
- return -1;
+ rc = -1;
break;
/* not used at this moment */
case RTE_INTR_HANDLE_ALARM:
- return -1;
+ rc = -1;
+ break;
#ifdef VFIO_PRESENT
case RTE_INTR_HANDLE_VFIO_MSIX:
if (vfio_disable_msix(intr_handle))
- return -1;
+ rc = -1;
break;
case RTE_INTR_HANDLE_VFIO_MSI:
if (vfio_disable_msi(intr_handle))
- return -1;
+ rc = -1;
break;
case RTE_INTR_HANDLE_VFIO_LEGACY:
if (vfio_disable_intx(intr_handle))
- return -1;
+ rc = -1;
break;
#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
case RTE_INTR_HANDLE_VFIO_REQ:
if (vfio_disable_req(intr_handle))
- return -1;
+ rc = -1;
break;
#endif
#endif
/* not used at this moment */
case RTE_INTR_HANDLE_DEV_EVENT:
- return -1;
+ rc = -1;
+ break;
/* unknown handle type */
default:
RTE_LOG(ERR, EAL,
"Unknown handle type of fd %d\n",
intr_handle->fd);
- return -1;
+ rc = -1;
+ break;
}
-
- return 0;
+out:
+ rte_eal_trace_intr_disable(intr_handle, rc);
+ return rc;
}
static int
}
/* notify the pipe fd waited by epoll_wait to rebuild the wait list */
- if (rv >= 0 && write(intr_pipe.writefd, "1", 1) < 0) {
+ if (rv > 0 && write(intr_pipe.writefd, "1", 1) < 0) {
rte_spinlock_unlock(&intr_lock);
return -EPIPE;
}
* @return
* never return;
*/
-static __attribute__((noreturn)) void *
+static __rte_noreturn void *
eal_intr_thread_main(__rte_unused void *arg)
{
/* host thread, never break out */
{
unsigned int i, count = 0;
struct rte_epoll_event *rev;
+ uint32_t valid_status;
for (i = 0; i < n; i++) {
rev = evs[i].data.ptr;
- if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID,
- RTE_EPOLL_EXEC))
+ valid_status = RTE_EPOLL_VALID;
+ /* ACQUIRE memory ordering here pairs with RELEASE
+ * ordering below acting as a lock to synchronize
+ * the event data updating.
+ */
+ if (!rev || !__atomic_compare_exchange_n(&rev->status,
+ &valid_status, RTE_EPOLL_EXEC, 0,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
continue;
events[count].status = RTE_EPOLL_VALID;
events[count].fd = rev->fd;
events[count].epfd = rev->epfd;
- events[count].epdata.event = rev->epdata.event;
+ events[count].epdata.event = evs[i].events;
events[count].epdata.data = rev->epdata.data;
if (rev->epdata.cb_fun)
rev->epdata.cb_fun(rev->fd,
rev->epdata.cb_arg);
- rte_compiler_barrier();
- rev->status = RTE_EPOLL_VALID;
+ /* the status update should be observed after
+ * the other fields change.
+ */
+ __atomic_store_n(&rev->status, RTE_EPOLL_VALID,
+ __ATOMIC_RELEASE);
count++;
}
return count;
return RTE_PER_LCORE(_epfd);
}
-int
-rte_epoll_wait(int epfd, struct rte_epoll_event *events,
- int maxevents, int timeout)
+static int
+eal_epoll_wait(int epfd, struct rte_epoll_event *events,
+ int maxevents, int timeout, bool interruptible)
{
struct epoll_event evs[maxevents];
int rc;
rc = eal_epoll_process_event(evs, rc, events);
break;
} else if (rc < 0) {
- if (errno == EINTR)
- continue;
+ if (errno == EINTR) {
+ if (interruptible)
+ return -1;
+ else
+ continue;
+ }
/* epoll_wait fail */
RTE_LOG(ERR, EAL, "epoll_wait returns with fail %s\n",
strerror(errno));
return rc;
}
+int
+rte_epoll_wait(int epfd, struct rte_epoll_event *events,
+ int maxevents, int timeout)
+{
+ return eal_epoll_wait(epfd, events, maxevents, timeout, false);
+}
+
+int
+rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
+ int maxevents, int timeout)
+{
+ return eal_epoll_wait(epfd, events, maxevents, timeout, true);
+}
+
static inline void
eal_epoll_data_safe_free(struct rte_epoll_event *ev)
{
- while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID,
- RTE_EPOLL_INVALID))
- while (ev->status != RTE_EPOLL_VALID)
+ uint32_t valid_status = RTE_EPOLL_VALID;
+
+ while (!__atomic_compare_exchange_n(&ev->status, &valid_status,
+ RTE_EPOLL_INVALID, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+ while (__atomic_load_n(&ev->status,
+ __ATOMIC_RELAXED) != RTE_EPOLL_VALID)
rte_pause();
+ valid_status = RTE_EPOLL_VALID;
+ }
memset(&ev->epdata, 0, sizeof(ev->epdata));
ev->fd = -1;
ev->epfd = -1;
epfd = rte_intr_tls_epfd();
if (op == EPOLL_CTL_ADD) {
- event->status = RTE_EPOLL_VALID;
+ __atomic_store_n(&event->status, RTE_EPOLL_VALID,
+ __ATOMIC_RELAXED);
event->fd = fd; /* ignore fd in event */
event->epfd = epfd;
ev.data.ptr = (void *)event;
op, fd, strerror(errno));
if (op == EPOLL_CTL_ADD)
/* rollback status when CTL_ADD fail */
- event->status = RTE_EPOLL_INVALID;
+ __atomic_store_n(&event->status, RTE_EPOLL_INVALID,
+ __ATOMIC_RELAXED);
return -1;
}
- if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID)
+ if (op == EPOLL_CTL_DEL && __atomic_load_n(&event->status,
+ __ATOMIC_RELAXED) != RTE_EPOLL_INVALID)
eal_epoll_data_safe_free(event);
return 0;
case RTE_INTR_EVENT_ADD:
epfd_op = EPOLL_CTL_ADD;
rev = &intr_handle->elist[efd_idx];
- if (rev->status != RTE_EPOLL_INVALID) {
+ if (__atomic_load_n(&rev->status,
+ __ATOMIC_RELAXED) != RTE_EPOLL_INVALID) {
RTE_LOG(INFO, EAL, "Event already been added.\n");
return -EEXIST;
}
case RTE_INTR_EVENT_DEL:
epfd_op = EPOLL_CTL_DEL;
rev = &intr_handle->elist[efd_idx];
- if (rev->status == RTE_EPOLL_INVALID) {
+ if (__atomic_load_n(&rev->status,
+ __ATOMIC_RELAXED) == RTE_EPOLL_INVALID) {
RTE_LOG(INFO, EAL, "Event does not exist.\n");
return -EPERM;
}
for (i = 0; i < intr_handle->nb_efd; i++) {
rev = &intr_handle->elist[i];
- if (rev->status == RTE_EPOLL_INVALID)
+ if (__atomic_load_n(&rev->status,
+ __ATOMIC_RELAXED) == RTE_EPOLL_INVALID)
continue;
if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
/* force free if the entry valid */
eal_epoll_data_safe_free(rev);
- rev->status = RTE_EPOLL_INVALID;
}
}
}