1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
16 #include <rte_eal_trace.h>
18 #include "eal_private.h"
19 #include "eal_alarm_private.h"
21 #define MAX_INTR_EVENTS 16
24 * union buffer for reading on different devices
26 union rte_intr_read_buffer {
27 char charbuf[16]; /* for others */
30 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
31 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
33 struct rte_intr_callback {
34 TAILQ_ENTRY(rte_intr_callback) next;
35 rte_intr_callback_fn cb_fn; /**< callback address */
36 void *cb_arg; /**< parameter for callback */
37 uint8_t pending_delete; /**< delete after callback is called */
38 rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
41 struct rte_intr_source {
42 TAILQ_ENTRY(rte_intr_source) next;
43 struct rte_intr_handle intr_handle; /**< interrupt handle */
44 struct rte_intr_cb_list callbacks; /**< user callbacks */
48 /* global spinlock for interrupt data operation */
49 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
51 /* interrupt sources list */
52 static struct rte_intr_source_list intr_sources;
54 /* interrupt handling thread */
55 static pthread_t intr_thread;
57 static volatile int kq = -1;
60 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
62 /* alarm callbacks are special case */
63 if (ih->type == RTE_INTR_HANDLE_ALARM) {
66 /* get soonest alarm timeout */
67 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
70 ke->filter = EVFILT_TIMER;
71 /* timers are one shot */
72 ke->flags |= EV_ONESHOT;
73 ke->fflags = NOTE_NSECONDS;
74 ke->data = timeout_ns;
76 ke->filter = EVFILT_READ;
84 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
85 rte_intr_callback_fn cb, void *cb_arg)
87 struct rte_intr_callback *callback;
88 struct rte_intr_source *src;
89 int ret = 0, add_event = 0;
91 /* first do parameter checking */
92 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
94 "Registering with invalid input parameter\n");
98 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
102 rte_spinlock_lock(&intr_lock);
104 /* find the source for this intr_handle */
105 TAILQ_FOREACH(src, &intr_sources, next) {
106 if (src->intr_handle.fd == intr_handle->fd)
110 /* if this is an alarm interrupt and it already has a callback,
111 * then we don't want to create a new callback because the only
112 * thing on the list should be eal_alarm_callback() and we may
113 * be called just to reset the timer.
115 if (src != NULL && src->intr_handle.type == RTE_INTR_HANDLE_ALARM &&
116 !TAILQ_EMPTY(&src->callbacks)) {
119 /* allocate a new interrupt callback entity */
120 callback = calloc(1, sizeof(*callback));
121 if (callback == NULL) {
122 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
126 callback->cb_fn = cb;
127 callback->cb_arg = cb_arg;
128 callback->pending_delete = 0;
129 callback->ucb_fn = NULL;
132 src = calloc(1, sizeof(*src));
134 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
138 src->intr_handle = *intr_handle;
139 TAILQ_INIT(&src->callbacks);
140 TAILQ_INSERT_TAIL(&intr_sources, src, next);
144 /* we had no interrupts for this */
145 if (TAILQ_EMPTY(&src->callbacks))
148 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
151 /* add events to the queue. timer events are special as we need to
154 if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
157 memset(&ke, 0, sizeof(ke));
158 ke.flags = EV_ADD; /* mark for addition to the queue */
160 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
161 RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
167 * add the intr file descriptor into wait list.
169 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
170 /* currently, nic_uio does not support interrupts, so
171 * this error will always be triggered and output to the
172 * user. so, don't output it unless debug log level set.
175 RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
176 src->intr_handle.fd);
178 RTE_LOG(ERR, EAL, "Error adding fd %d "
186 rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
187 rte_spinlock_unlock(&intr_lock);
193 if (callback != NULL)
194 TAILQ_REMOVE(&(src->callbacks), callback, next);
195 if (TAILQ_EMPTY(&(src->callbacks))) {
196 TAILQ_REMOVE(&intr_sources, src, next);
201 rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
202 rte_spinlock_unlock(&intr_lock);
207 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
208 rte_intr_callback_fn cb_fn, void *cb_arg,
209 rte_intr_unregister_callback_fn ucb_fn)
212 struct rte_intr_source *src;
213 struct rte_intr_callback *cb, *next;
215 /* do parameter checking first */
216 if (intr_handle == NULL || intr_handle->fd < 0) {
218 "Unregistering with invalid input parameter\n");
223 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
227 rte_spinlock_lock(&intr_lock);
229 /* check if the insterrupt source for the fd is existent */
230 TAILQ_FOREACH(src, &intr_sources, next)
231 if (src->intr_handle.fd == intr_handle->fd)
234 /* No interrupt source registered for the fd */
238 /* only usable if the source is active */
239 } else if (src->active == 0) {
245 /* walk through the callbacks and mark all that match. */
246 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
247 next = TAILQ_NEXT(cb, next);
248 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
249 cb->cb_arg == cb_arg)) {
250 cb->pending_delete = 1;
257 rte_spinlock_unlock(&intr_lock);
263 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
264 rte_intr_callback_fn cb_fn, void *cb_arg)
267 struct rte_intr_source *src;
268 struct rte_intr_callback *cb, *next;
270 /* do parameter checking first */
271 if (intr_handle == NULL || intr_handle->fd < 0) {
273 "Unregistering with invalid input parameter\n");
277 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
281 rte_spinlock_lock(&intr_lock);
283 /* check if the insterrupt source for the fd is existent */
284 TAILQ_FOREACH(src, &intr_sources, next)
285 if (src->intr_handle.fd == intr_handle->fd)
288 /* No interrupt source registered for the fd */
292 /* interrupt source has some active callbacks right now. */
293 } else if (src->active != 0) {
302 /* remove it from the kqueue */
303 memset(&ke, 0, sizeof(ke));
304 ke.flags = EV_DELETE; /* mark for deletion from the queue */
306 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
307 RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
313 * remove intr file descriptor from wait list.
315 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
316 RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
317 src->intr_handle.fd, strerror(errno));
318 /* removing non-existent even is an expected condition
319 * in some circumstances (e.g. oneshot events).
323 /*walk through the callbacks and remove all that match. */
324 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
325 next = TAILQ_NEXT(cb, next);
326 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
327 cb->cb_arg == cb_arg)) {
328 TAILQ_REMOVE(&src->callbacks, cb, next);
334 /* all callbacks for that source are removed. */
335 if (TAILQ_EMPTY(&src->callbacks)) {
336 TAILQ_REMOVE(&intr_sources, src, next);
341 rte_eal_trace_intr_callback_unregister(intr_handle, cb_fn, cb_arg,
343 rte_spinlock_unlock(&intr_lock);
349 rte_intr_callback_unregister_sync(const struct rte_intr_handle *intr_handle,
350 rte_intr_callback_fn cb_fn, void *cb_arg)
354 while ((ret = rte_intr_callback_unregister(intr_handle, cb_fn, cb_arg)) == -EAGAIN)
361 rte_intr_enable(const struct rte_intr_handle *intr_handle)
365 if (intr_handle == NULL)
368 if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
373 if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
378 switch (intr_handle->type) {
379 /* not used at this moment */
380 case RTE_INTR_HANDLE_ALARM:
383 /* not used at this moment */
384 case RTE_INTR_HANDLE_DEV_EVENT:
387 /* unknown handle type */
390 "Unknown handle type of fd %d\n",
397 rte_eal_trace_intr_enable(intr_handle, rc);
402 rte_intr_disable(const struct rte_intr_handle *intr_handle)
406 if (intr_handle == NULL)
409 if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
414 if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
419 switch (intr_handle->type) {
420 /* not used at this moment */
421 case RTE_INTR_HANDLE_ALARM:
424 /* not used at this moment */
425 case RTE_INTR_HANDLE_DEV_EVENT:
428 /* unknown handle type */
431 "Unknown handle type of fd %d\n",
437 rte_eal_trace_intr_disable(intr_handle, rc);
442 rte_intr_ack(const struct rte_intr_handle *intr_handle)
444 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
451 eal_intr_process_interrupts(struct kevent *events, int nfds)
453 struct rte_intr_callback active_cb;
454 union rte_intr_read_buffer buf;
455 struct rte_intr_callback *cb, *next;
456 struct rte_intr_source *src;
461 for (n = 0; n < nfds; n++) {
462 int event_fd = events[n].ident;
464 rte_spinlock_lock(&intr_lock);
465 TAILQ_FOREACH(src, &intr_sources, next)
466 if (src->intr_handle.fd == event_fd)
469 rte_spinlock_unlock(&intr_lock);
473 /* mark this interrupt source as active and release the lock. */
475 rte_spinlock_unlock(&intr_lock);
477 /* set the length to be read dor different handle type */
478 switch (src->intr_handle.type) {
479 case RTE_INTR_HANDLE_ALARM:
483 case RTE_INTR_HANDLE_VDEV:
484 case RTE_INTR_HANDLE_EXT:
488 case RTE_INTR_HANDLE_DEV_EVENT:
497 if (bytes_read > 0) {
499 * read out to clear the ready-to-be-read flag
502 bytes_read = read(event_fd, &buf, bytes_read);
503 if (bytes_read < 0) {
504 if (errno == EINTR || errno == EWOULDBLOCK)
507 RTE_LOG(ERR, EAL, "Error reading from file "
508 "descriptor %d: %s\n",
511 } else if (bytes_read == 0)
512 RTE_LOG(ERR, EAL, "Read nothing from file "
513 "descriptor %d\n", event_fd);
518 /* grab a lock, again to call callbacks and update status. */
519 rte_spinlock_lock(&intr_lock);
522 /* Finally, call all callbacks. */
523 TAILQ_FOREACH(cb, &src->callbacks, next) {
525 /* make a copy and unlock. */
527 rte_spinlock_unlock(&intr_lock);
529 /* call the actual callback */
530 active_cb.cb_fn(active_cb.cb_arg);
532 /*get the lock back. */
533 rte_spinlock_lock(&intr_lock);
537 /* we done with that interrupt source, release it. */
540 /* check if any callback are supposed to be removed */
541 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
542 next = TAILQ_NEXT(cb, next);
543 if (cb->pending_delete) {
544 /* remove it from the kqueue */
545 memset(&ke, 0, sizeof(ke));
546 /* mark for deletion from the queue */
547 ke.flags = EV_DELETE;
549 if (intr_source_to_kevent(&src->intr_handle, &ke) < 0) {
550 RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
551 rte_spinlock_unlock(&intr_lock);
556 * remove intr file descriptor from wait list.
558 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
559 RTE_LOG(ERR, EAL, "Error removing fd %d kevent, "
560 "%s\n", src->intr_handle.fd,
562 /* removing non-existent even is an expected
563 * condition in some circumstances
564 * (e.g. oneshot events).
568 TAILQ_REMOVE(&src->callbacks, cb, next);
570 cb->ucb_fn(&src->intr_handle, cb->cb_arg);
575 /* all callbacks for that source are removed. */
576 if (TAILQ_EMPTY(&src->callbacks)) {
577 TAILQ_REMOVE(&intr_sources, src, next);
581 rte_spinlock_unlock(&intr_lock);
586 eal_intr_thread_main(void *arg __rte_unused)
588 struct kevent events[MAX_INTR_EVENTS];
591 /* host thread, never break out */
593 /* do not change anything, just wait */
594 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
601 "kevent returns with fail\n");
604 /* kevent timeout, will never happen here */
608 /* kevent has at least one fd ready to read */
609 eal_intr_process_interrupts(events, nfds);
617 rte_eal_intr_init(void)
621 /* init the global interrupt source head */
622 TAILQ_INIT(&intr_sources);
626 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
630 /* create the host thread to wait/handle the interrupt */
631 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
632 eal_intr_thread_main, NULL);
636 "Failed to create thread for interrupt handling\n");
643 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
644 int epfd, int op, unsigned int vec, void *data)
646 RTE_SET_USED(intr_handle);
656 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
658 RTE_SET_USED(intr_handle);
659 RTE_SET_USED(nb_efd);
665 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
667 RTE_SET_USED(intr_handle);
671 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
673 RTE_SET_USED(intr_handle);
678 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
680 RTE_SET_USED(intr_handle);
685 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
687 RTE_SET_USED(intr_handle);
692 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
693 int maxevents, int timeout)
696 RTE_SET_USED(events);
697 RTE_SET_USED(maxevents);
698 RTE_SET_USED(timeout);
704 rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
705 int maxevents, int timeout)
708 RTE_SET_USED(events);
709 RTE_SET_USED(maxevents);
710 RTE_SET_USED(timeout);
716 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
727 rte_intr_tls_epfd(void)
733 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
735 RTE_SET_USED(intr_handle);
738 int rte_thread_is_intr(void)
740 return pthread_equal(intr_thread, pthread_self());