1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
17 #include "eal_private.h"
18 #include "eal_alarm_private.h"
20 #define MAX_INTR_EVENTS 16
23 * union buffer for reading on different devices
25 union rte_intr_read_buffer {
26 char charbuf[16]; /* for others */
29 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
30 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
32 struct rte_intr_callback {
33 TAILQ_ENTRY(rte_intr_callback) next;
34 rte_intr_callback_fn cb_fn; /**< callback address */
35 void *cb_arg; /**< parameter for callback */
36 uint8_t pending_delete; /**< delete after callback is called */
37 rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
40 struct rte_intr_source {
41 TAILQ_ENTRY(rte_intr_source) next;
42 struct rte_intr_handle intr_handle; /**< interrupt handle */
43 struct rte_intr_cb_list callbacks; /**< user callbacks */
47 /* global spinlock for interrupt data operation */
48 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
50 /* interrupt sources list */
51 static struct rte_intr_source_list intr_sources;
53 /* interrupt handling thread */
54 static pthread_t intr_thread;
56 static volatile int kq = -1;
59 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
61 /* alarm callbacks are special case */
62 if (ih->type == RTE_INTR_HANDLE_ALARM) {
65 /* get soonest alarm timeout */
66 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
69 ke->filter = EVFILT_TIMER;
70 /* timers are one shot */
71 ke->flags |= EV_ONESHOT;
72 ke->fflags = NOTE_NSECONDS;
73 ke->data = timeout_ns;
75 ke->filter = EVFILT_READ;
83 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
84 rte_intr_callback_fn cb, void *cb_arg)
86 struct rte_intr_callback *callback;
87 struct rte_intr_source *src;
88 int ret, add_event = 0;
90 /* first do parameter checking */
91 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
93 "Registering with invalid input parameter\n");
97 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
101 rte_spinlock_lock(&intr_lock);
103 /* find the source for this intr_handle */
104 TAILQ_FOREACH(src, &intr_sources, next) {
105 if (src->intr_handle.fd == intr_handle->fd)
109 /* if this is an alarm interrupt and it already has a callback,
110 * then we don't want to create a new callback because the only
111 * thing on the list should be eal_alarm_callback() and we may
112 * be called just to reset the timer.
114 if (src != NULL && src->intr_handle.type == RTE_INTR_HANDLE_ALARM &&
115 !TAILQ_EMPTY(&src->callbacks)) {
118 /* allocate a new interrupt callback entity */
119 callback = calloc(1, sizeof(*callback));
120 if (callback == NULL) {
121 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
125 callback->cb_fn = cb;
126 callback->cb_arg = cb_arg;
127 callback->pending_delete = 0;
128 callback->ucb_fn = NULL;
131 src = calloc(1, sizeof(*src));
133 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
137 src->intr_handle = *intr_handle;
138 TAILQ_INIT(&src->callbacks);
139 TAILQ_INSERT_TAIL(&intr_sources, src, next);
143 /* we had no interrupts for this */
144 if (TAILQ_EMPTY(&src->callbacks))
147 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
150 /* add events to the queue. timer events are special as we need to
153 if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
156 memset(&ke, 0, sizeof(ke));
157 ke.flags = EV_ADD; /* mark for addition to the queue */
159 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
160 RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
166 * add the intr file descriptor into wait list.
168 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
169 /* currently, nic_uio does not support interrupts, so
170 * this error will always be triggered and output to the
171 * user. so, don't output it unless debug log level set.
174 RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
175 src->intr_handle.fd);
177 RTE_LOG(ERR, EAL, "Error adding fd %d "
185 rte_spinlock_unlock(&intr_lock);
191 if (callback != NULL)
192 TAILQ_REMOVE(&(src->callbacks), callback, next);
193 if (TAILQ_EMPTY(&(src->callbacks))) {
194 TAILQ_REMOVE(&intr_sources, src, next);
199 rte_spinlock_unlock(&intr_lock);
204 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
205 rte_intr_callback_fn cb_fn, void *cb_arg,
206 rte_intr_unregister_callback_fn ucb_fn)
209 struct rte_intr_source *src;
210 struct rte_intr_callback *cb, *next;
212 /* do parameter checking first */
213 if (intr_handle == NULL || intr_handle->fd < 0) {
215 "Unregistering with invalid input parameter\n");
220 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
224 rte_spinlock_lock(&intr_lock);
226 /* check if the insterrupt source for the fd is existent */
227 TAILQ_FOREACH(src, &intr_sources, next)
228 if (src->intr_handle.fd == intr_handle->fd)
231 /* No interrupt source registered for the fd */
235 /* only usable if the source is active */
236 } else if (src->active == 0) {
242 /* walk through the callbacks and mark all that match. */
243 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
244 next = TAILQ_NEXT(cb, next);
245 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
246 cb->cb_arg == cb_arg)) {
247 cb->pending_delete = 1;
254 rte_spinlock_unlock(&intr_lock);
260 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
261 rte_intr_callback_fn cb_fn, void *cb_arg)
264 struct rte_intr_source *src;
265 struct rte_intr_callback *cb, *next;
267 /* do parameter checking first */
268 if (intr_handle == NULL || intr_handle->fd < 0) {
270 "Unregistering with invalid input parameter\n");
274 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
278 rte_spinlock_lock(&intr_lock);
280 /* check if the insterrupt source for the fd is existent */
281 TAILQ_FOREACH(src, &intr_sources, next)
282 if (src->intr_handle.fd == intr_handle->fd)
285 /* No interrupt source registered for the fd */
289 /* interrupt source has some active callbacks right now. */
290 } else if (src->active != 0) {
299 /* remove it from the kqueue */
300 memset(&ke, 0, sizeof(ke));
301 ke.flags = EV_DELETE; /* mark for deletion from the queue */
303 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
304 RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
310 * remove intr file descriptor from wait list.
312 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
313 RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
314 src->intr_handle.fd, strerror(errno));
315 /* removing non-existent even is an expected condition
316 * in some circumstances (e.g. oneshot events).
320 /*walk through the callbacks and remove all that match. */
321 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
322 next = TAILQ_NEXT(cb, next);
323 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
324 cb->cb_arg == cb_arg)) {
325 TAILQ_REMOVE(&src->callbacks, cb, next);
331 /* all callbacks for that source are removed. */
332 if (TAILQ_EMPTY(&src->callbacks)) {
333 TAILQ_REMOVE(&intr_sources, src, next);
338 rte_spinlock_unlock(&intr_lock);
344 rte_intr_enable(const struct rte_intr_handle *intr_handle)
346 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
349 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
352 switch (intr_handle->type) {
353 /* not used at this moment */
354 case RTE_INTR_HANDLE_ALARM:
356 /* not used at this moment */
357 case RTE_INTR_HANDLE_DEV_EVENT:
359 /* unknown handle type */
362 "Unknown handle type of fd %d\n",
371 rte_intr_disable(const struct rte_intr_handle *intr_handle)
373 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
376 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
379 switch (intr_handle->type) {
380 /* not used at this moment */
381 case RTE_INTR_HANDLE_ALARM:
383 /* not used at this moment */
384 case RTE_INTR_HANDLE_DEV_EVENT:
386 /* unknown handle type */
389 "Unknown handle type of fd %d\n",
398 rte_intr_ack(const struct rte_intr_handle *intr_handle)
400 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
407 eal_intr_process_interrupts(struct kevent *events, int nfds)
409 struct rte_intr_callback active_cb;
410 union rte_intr_read_buffer buf;
411 struct rte_intr_callback *cb, *next;
412 struct rte_intr_source *src;
417 for (n = 0; n < nfds; n++) {
418 int event_fd = events[n].ident;
420 rte_spinlock_lock(&intr_lock);
421 TAILQ_FOREACH(src, &intr_sources, next)
422 if (src->intr_handle.fd == event_fd)
425 rte_spinlock_unlock(&intr_lock);
429 /* mark this interrupt source as active and release the lock. */
431 rte_spinlock_unlock(&intr_lock);
433 /* set the length to be read dor different handle type */
434 switch (src->intr_handle.type) {
435 case RTE_INTR_HANDLE_ALARM:
439 case RTE_INTR_HANDLE_VDEV:
440 case RTE_INTR_HANDLE_EXT:
444 case RTE_INTR_HANDLE_DEV_EVENT:
453 if (bytes_read > 0) {
455 * read out to clear the ready-to-be-read flag
458 bytes_read = read(event_fd, &buf, bytes_read);
459 if (bytes_read < 0) {
460 if (errno == EINTR || errno == EWOULDBLOCK)
463 RTE_LOG(ERR, EAL, "Error reading from file "
464 "descriptor %d: %s\n",
467 } else if (bytes_read == 0)
468 RTE_LOG(ERR, EAL, "Read nothing from file "
469 "descriptor %d\n", event_fd);
474 /* grab a lock, again to call callbacks and update status. */
475 rte_spinlock_lock(&intr_lock);
478 /* Finally, call all callbacks. */
479 TAILQ_FOREACH(cb, &src->callbacks, next) {
481 /* make a copy and unlock. */
483 rte_spinlock_unlock(&intr_lock);
485 /* call the actual callback */
486 active_cb.cb_fn(active_cb.cb_arg);
488 /*get the lock back. */
489 rte_spinlock_lock(&intr_lock);
493 /* we done with that interrupt source, release it. */
496 /* check if any callback are supposed to be removed */
497 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
498 next = TAILQ_NEXT(cb, next);
499 if (cb->pending_delete) {
500 /* remove it from the kqueue */
501 memset(&ke, 0, sizeof(ke));
502 /* mark for deletion from the queue */
503 ke.flags = EV_DELETE;
505 if (intr_source_to_kevent(&src->intr_handle, &ke) < 0) {
506 RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
507 rte_spinlock_unlock(&intr_lock);
512 * remove intr file descriptor from wait list.
514 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
515 RTE_LOG(ERR, EAL, "Error removing fd %d kevent, "
516 "%s\n", src->intr_handle.fd,
518 /* removing non-existent even is an expected
519 * condition in some circumstances
520 * (e.g. oneshot events).
524 TAILQ_REMOVE(&src->callbacks, cb, next);
526 cb->ucb_fn(&src->intr_handle, cb->cb_arg);
531 /* all callbacks for that source are removed. */
532 if (TAILQ_EMPTY(&src->callbacks)) {
533 TAILQ_REMOVE(&intr_sources, src, next);
537 rte_spinlock_unlock(&intr_lock);
542 eal_intr_thread_main(void *arg __rte_unused)
544 struct kevent events[MAX_INTR_EVENTS];
547 /* host thread, never break out */
549 /* do not change anything, just wait */
550 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
557 "kevent returns with fail\n");
560 /* kevent timeout, will never happen here */
564 /* kevent has at least one fd ready to read */
565 eal_intr_process_interrupts(events, nfds);
573 rte_eal_intr_init(void)
577 /* init the global interrupt source head */
578 TAILQ_INIT(&intr_sources);
582 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
586 /* create the host thread to wait/handle the interrupt */
587 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
588 eal_intr_thread_main, NULL);
592 "Failed to create thread for interrupt handling\n");
599 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
600 int epfd, int op, unsigned int vec, void *data)
602 RTE_SET_USED(intr_handle);
612 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
614 RTE_SET_USED(intr_handle);
615 RTE_SET_USED(nb_efd);
621 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
623 RTE_SET_USED(intr_handle);
627 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
629 RTE_SET_USED(intr_handle);
634 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
636 RTE_SET_USED(intr_handle);
641 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
643 RTE_SET_USED(intr_handle);
648 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
649 int maxevents, int timeout)
652 RTE_SET_USED(events);
653 RTE_SET_USED(maxevents);
654 RTE_SET_USED(timeout);
660 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
671 rte_intr_tls_epfd(void)
677 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
679 RTE_SET_USED(intr_handle);
682 int rte_thread_is_intr(void)
684 return pthread_equal(intr_thread, pthread_self());