1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
17 #include "eal_private.h"
18 #include "eal_alarm_private.h"
20 #define MAX_INTR_EVENTS 16
23 * union buffer for reading on different devices
25 union rte_intr_read_buffer {
26 char charbuf[16]; /* for others */
29 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
30 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
32 struct rte_intr_callback {
33 TAILQ_ENTRY(rte_intr_callback) next;
34 rte_intr_callback_fn cb_fn; /**< callback address */
35 void *cb_arg; /**< parameter for callback */
38 struct rte_intr_source {
39 TAILQ_ENTRY(rte_intr_source) next;
40 struct rte_intr_handle intr_handle; /**< interrupt handle */
41 struct rte_intr_cb_list callbacks; /**< user callbacks */
45 /* global spinlock for interrupt data operation */
46 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
48 /* interrupt sources list */
49 static struct rte_intr_source_list intr_sources;
51 /* interrupt handling thread */
52 static pthread_t intr_thread;
54 static volatile int kq = -1;
57 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
59 /* alarm callbacks are special case */
60 if (ih->type == RTE_INTR_HANDLE_ALARM) {
63 /* get soonest alarm timeout */
64 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
67 ke->filter = EVFILT_TIMER;
68 /* timers are one shot */
69 ke->flags |= EV_ONESHOT;
70 ke->fflags = NOTE_NSECONDS;
71 ke->data = timeout_ns;
73 ke->filter = EVFILT_READ;
81 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
82 rte_intr_callback_fn cb, void *cb_arg)
84 struct rte_intr_callback *callback = NULL;
85 struct rte_intr_source *src = NULL;
88 /* first do parameter checking */
89 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
91 "Registering with invalid input parameter\n");
95 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
99 /* allocate a new interrupt callback entity */
100 callback = calloc(1, sizeof(*callback));
101 if (callback == NULL) {
102 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
105 callback->cb_fn = cb;
106 callback->cb_arg = cb_arg;
108 rte_spinlock_lock(&intr_lock);
110 /* check if there is at least one callback registered for the fd */
111 TAILQ_FOREACH(src, &intr_sources, next) {
112 if (src->intr_handle.fd == intr_handle->fd) {
113 /* we had no interrupts for this */
114 if (TAILQ_EMPTY(&src->callbacks))
117 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
123 /* no existing callbacks for this - add new source */
125 src = calloc(1, sizeof(*src));
127 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
131 src->intr_handle = *intr_handle;
132 TAILQ_INIT(&src->callbacks);
133 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
134 TAILQ_INSERT_TAIL(&intr_sources, src, next);
140 /* add events to the queue. timer events are special as we need to
143 if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
146 memset(&ke, 0, sizeof(ke));
147 ke.flags = EV_ADD; /* mark for addition to the queue */
149 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
150 RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
156 * add the intr file descriptor into wait list.
158 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
159 /* currently, nic_uio does not support interrupts, so
160 * this error will always be triggered and output to the
161 * user. so, don't output it unless debug log level set.
164 RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
165 src->intr_handle.fd);
167 RTE_LOG(ERR, EAL, "Error adding fd %d "
175 rte_spinlock_unlock(&intr_lock);
181 TAILQ_REMOVE(&(src->callbacks), callback, next);
182 if (TAILQ_EMPTY(&(src->callbacks))) {
183 TAILQ_REMOVE(&intr_sources, src, next);
188 rte_spinlock_unlock(&intr_lock);
193 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
194 rte_intr_callback_fn cb_fn, void *cb_arg)
197 struct rte_intr_source *src;
198 struct rte_intr_callback *cb, *next;
200 /* do parameter checking first */
201 if (intr_handle == NULL || intr_handle->fd < 0) {
203 "Unregistering with invalid input parameter\n");
207 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
211 rte_spinlock_lock(&intr_lock);
213 /* check if the insterrupt source for the fd is existent */
214 TAILQ_FOREACH(src, &intr_sources, next)
215 if (src->intr_handle.fd == intr_handle->fd)
218 /* No interrupt source registered for the fd */
222 /* interrupt source has some active callbacks right now. */
223 } else if (src->active != 0) {
232 /* remove it from the kqueue */
233 memset(&ke, 0, sizeof(ke));
234 ke.flags = EV_DELETE; /* mark for deletion from the queue */
236 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
237 RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
243 * remove intr file descriptor from wait list.
245 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
246 RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
247 src->intr_handle.fd, strerror(errno));
248 /* removing non-existent even is an expected condition
249 * in some circumstances (e.g. oneshot events).
253 /*walk through the callbacks and remove all that match. */
254 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
255 next = TAILQ_NEXT(cb, next);
256 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
257 cb->cb_arg == cb_arg)) {
258 TAILQ_REMOVE(&src->callbacks, cb, next);
264 /* all callbacks for that source are removed. */
265 if (TAILQ_EMPTY(&src->callbacks)) {
266 TAILQ_REMOVE(&intr_sources, src, next);
271 rte_spinlock_unlock(&intr_lock);
277 rte_intr_enable(const struct rte_intr_handle *intr_handle)
279 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
282 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
285 switch (intr_handle->type) {
286 /* not used at this moment */
287 case RTE_INTR_HANDLE_ALARM:
289 /* not used at this moment */
290 case RTE_INTR_HANDLE_DEV_EVENT:
292 /* unknown handle type */
295 "Unknown handle type of fd %d\n",
304 rte_intr_disable(const struct rte_intr_handle *intr_handle)
306 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
309 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
312 switch (intr_handle->type) {
313 /* not used at this moment */
314 case RTE_INTR_HANDLE_ALARM:
316 /* not used at this moment */
317 case RTE_INTR_HANDLE_DEV_EVENT:
319 /* unknown handle type */
322 "Unknown handle type of fd %d\n",
331 eal_intr_process_interrupts(struct kevent *events, int nfds)
333 struct rte_intr_callback active_cb;
334 union rte_intr_read_buffer buf;
335 struct rte_intr_callback *cb;
336 struct rte_intr_source *src;
340 for (n = 0; n < nfds; n++) {
341 int event_fd = events[n].ident;
343 rte_spinlock_lock(&intr_lock);
344 TAILQ_FOREACH(src, &intr_sources, next)
345 if (src->intr_handle.fd == event_fd)
348 rte_spinlock_unlock(&intr_lock);
352 /* mark this interrupt source as active and release the lock. */
354 rte_spinlock_unlock(&intr_lock);
356 /* set the length to be read dor different handle type */
357 switch (src->intr_handle.type) {
358 case RTE_INTR_HANDLE_ALARM:
362 case RTE_INTR_HANDLE_VDEV:
363 case RTE_INTR_HANDLE_EXT:
367 case RTE_INTR_HANDLE_DEV_EVENT:
376 if (bytes_read > 0) {
378 * read out to clear the ready-to-be-read flag
381 bytes_read = read(event_fd, &buf, bytes_read);
382 if (bytes_read < 0) {
383 if (errno == EINTR || errno == EWOULDBLOCK)
386 RTE_LOG(ERR, EAL, "Error reading from file "
387 "descriptor %d: %s\n",
390 } else if (bytes_read == 0)
391 RTE_LOG(ERR, EAL, "Read nothing from file "
392 "descriptor %d\n", event_fd);
397 /* grab a lock, again to call callbacks and update status. */
398 rte_spinlock_lock(&intr_lock);
401 /* Finally, call all callbacks. */
402 TAILQ_FOREACH(cb, &src->callbacks, next) {
404 /* make a copy and unlock. */
406 rte_spinlock_unlock(&intr_lock);
408 /* call the actual callback */
409 active_cb.cb_fn(active_cb.cb_arg);
411 /*get the lock back. */
412 rte_spinlock_lock(&intr_lock);
416 /* we done with that interrupt source, release it. */
418 rte_spinlock_unlock(&intr_lock);
423 eal_intr_thread_main(void *arg __rte_unused)
425 struct kevent events[MAX_INTR_EVENTS];
428 /* host thread, never break out */
430 /* do not change anything, just wait */
431 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
438 "kevent returns with fail\n");
441 /* kevent timeout, will never happen here */
445 /* kevent has at least one fd ready to read */
446 eal_intr_process_interrupts(events, nfds);
454 rte_eal_intr_init(void)
458 /* init the global interrupt source head */
459 TAILQ_INIT(&intr_sources);
463 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
467 /* create the host thread to wait/handle the interrupt */
468 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
469 eal_intr_thread_main, NULL);
473 "Failed to create thread for interrupt handling\n");
480 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
481 int epfd, int op, unsigned int vec, void *data)
483 RTE_SET_USED(intr_handle);
493 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
495 RTE_SET_USED(intr_handle);
496 RTE_SET_USED(nb_efd);
502 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
504 RTE_SET_USED(intr_handle);
508 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
510 RTE_SET_USED(intr_handle);
515 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
517 RTE_SET_USED(intr_handle);
522 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
524 RTE_SET_USED(intr_handle);
529 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
530 int maxevents, int timeout)
533 RTE_SET_USED(events);
534 RTE_SET_USED(maxevents);
535 RTE_SET_USED(timeout);
541 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
552 rte_intr_tls_epfd(void)
558 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
560 RTE_SET_USED(intr_handle);