1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
17 #include "eal_private.h"
18 #include "eal_alarm_private.h"
20 #define MAX_INTR_EVENTS 16
23 * union buffer for reading on different devices
25 union rte_intr_read_buffer {
26 char charbuf[16]; /* for others */
29 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
30 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
32 struct rte_intr_callback {
33 TAILQ_ENTRY(rte_intr_callback) next;
34 rte_intr_callback_fn cb_fn; /**< callback address */
35 void *cb_arg; /**< parameter for callback */
36 uint8_t pending_delete; /**< delete after callback is called */
37 rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
40 struct rte_intr_source {
41 TAILQ_ENTRY(rte_intr_source) next;
42 struct rte_intr_handle intr_handle; /**< interrupt handle */
43 struct rte_intr_cb_list callbacks; /**< user callbacks */
47 /* global spinlock for interrupt data operation */
48 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
50 /* interrupt sources list */
51 static struct rte_intr_source_list intr_sources;
53 /* interrupt handling thread */
54 static pthread_t intr_thread;
56 static volatile int kq = -1;
59 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
61 /* alarm callbacks are special case */
62 if (ih->type == RTE_INTR_HANDLE_ALARM) {
65 /* get soonest alarm timeout */
66 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
69 ke->filter = EVFILT_TIMER;
70 /* timers are one shot */
71 ke->flags |= EV_ONESHOT;
72 ke->fflags = NOTE_NSECONDS;
73 ke->data = timeout_ns;
75 ke->filter = EVFILT_READ;
83 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
84 rte_intr_callback_fn cb, void *cb_arg)
86 struct rte_intr_callback *callback = NULL;
87 struct rte_intr_source *src = NULL;
90 /* first do parameter checking */
91 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
93 "Registering with invalid input parameter\n");
97 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
101 /* allocate a new interrupt callback entity */
102 callback = calloc(1, sizeof(*callback));
103 if (callback == NULL) {
104 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
107 callback->cb_fn = cb;
108 callback->cb_arg = cb_arg;
109 callback->pending_delete = 0;
110 callback->ucb_fn = NULL;
112 rte_spinlock_lock(&intr_lock);
114 /* check if there is at least one callback registered for the fd */
115 TAILQ_FOREACH(src, &intr_sources, next) {
116 if (src->intr_handle.fd == intr_handle->fd) {
117 /* we had no interrupts for this */
118 if (TAILQ_EMPTY(&src->callbacks))
121 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
127 /* no existing callbacks for this - add new source */
129 src = calloc(1, sizeof(*src));
131 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
135 src->intr_handle = *intr_handle;
136 TAILQ_INIT(&src->callbacks);
137 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
138 TAILQ_INSERT_TAIL(&intr_sources, src, next);
144 /* add events to the queue. timer events are special as we need to
147 if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
150 memset(&ke, 0, sizeof(ke));
151 ke.flags = EV_ADD; /* mark for addition to the queue */
153 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
154 RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
160 * add the intr file descriptor into wait list.
162 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
163 /* currently, nic_uio does not support interrupts, so
164 * this error will always be triggered and output to the
165 * user. so, don't output it unless debug log level set.
168 RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
169 src->intr_handle.fd);
171 RTE_LOG(ERR, EAL, "Error adding fd %d "
179 rte_spinlock_unlock(&intr_lock);
185 TAILQ_REMOVE(&(src->callbacks), callback, next);
186 if (TAILQ_EMPTY(&(src->callbacks))) {
187 TAILQ_REMOVE(&intr_sources, src, next);
192 rte_spinlock_unlock(&intr_lock);
197 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
198 rte_intr_callback_fn cb_fn, void *cb_arg,
199 rte_intr_unregister_callback_fn ucb_fn)
202 struct rte_intr_source *src;
203 struct rte_intr_callback *cb, *next;
205 /* do parameter checking first */
206 if (intr_handle == NULL || intr_handle->fd < 0) {
208 "Unregistering with invalid input parameter\n");
213 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
217 rte_spinlock_lock(&intr_lock);
219 /* check if the insterrupt source for the fd is existent */
220 TAILQ_FOREACH(src, &intr_sources, next)
221 if (src->intr_handle.fd == intr_handle->fd)
224 /* No interrupt source registered for the fd */
228 /* only usable if the source is active */
229 } else if (src->active == 0) {
235 /* walk through the callbacks and mark all that match. */
236 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
237 next = TAILQ_NEXT(cb, next);
238 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
239 cb->cb_arg == cb_arg)) {
240 cb->pending_delete = 1;
247 rte_spinlock_unlock(&intr_lock);
253 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
254 rte_intr_callback_fn cb_fn, void *cb_arg)
257 struct rte_intr_source *src;
258 struct rte_intr_callback *cb, *next;
260 /* do parameter checking first */
261 if (intr_handle == NULL || intr_handle->fd < 0) {
263 "Unregistering with invalid input parameter\n");
267 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
271 rte_spinlock_lock(&intr_lock);
273 /* check if the insterrupt source for the fd is existent */
274 TAILQ_FOREACH(src, &intr_sources, next)
275 if (src->intr_handle.fd == intr_handle->fd)
278 /* No interrupt source registered for the fd */
282 /* interrupt source has some active callbacks right now. */
283 } else if (src->active != 0) {
292 /* remove it from the kqueue */
293 memset(&ke, 0, sizeof(ke));
294 ke.flags = EV_DELETE; /* mark for deletion from the queue */
296 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
297 RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
303 * remove intr file descriptor from wait list.
305 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
306 RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
307 src->intr_handle.fd, strerror(errno));
308 /* removing non-existent even is an expected condition
309 * in some circumstances (e.g. oneshot events).
313 /*walk through the callbacks and remove all that match. */
314 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
315 next = TAILQ_NEXT(cb, next);
316 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
317 cb->cb_arg == cb_arg)) {
318 TAILQ_REMOVE(&src->callbacks, cb, next);
324 /* all callbacks for that source are removed. */
325 if (TAILQ_EMPTY(&src->callbacks)) {
326 TAILQ_REMOVE(&intr_sources, src, next);
331 rte_spinlock_unlock(&intr_lock);
337 rte_intr_enable(const struct rte_intr_handle *intr_handle)
339 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
342 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
345 switch (intr_handle->type) {
346 /* not used at this moment */
347 case RTE_INTR_HANDLE_ALARM:
349 /* not used at this moment */
350 case RTE_INTR_HANDLE_DEV_EVENT:
352 /* unknown handle type */
355 "Unknown handle type of fd %d\n",
364 rte_intr_disable(const struct rte_intr_handle *intr_handle)
366 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
369 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
372 switch (intr_handle->type) {
373 /* not used at this moment */
374 case RTE_INTR_HANDLE_ALARM:
376 /* not used at this moment */
377 case RTE_INTR_HANDLE_DEV_EVENT:
379 /* unknown handle type */
382 "Unknown handle type of fd %d\n",
391 rte_intr_ack(const struct rte_intr_handle *intr_handle)
393 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
400 eal_intr_process_interrupts(struct kevent *events, int nfds)
402 struct rte_intr_callback active_cb;
403 union rte_intr_read_buffer buf;
404 struct rte_intr_callback *cb, *next;
405 struct rte_intr_source *src;
410 for (n = 0; n < nfds; n++) {
411 int event_fd = events[n].ident;
413 rte_spinlock_lock(&intr_lock);
414 TAILQ_FOREACH(src, &intr_sources, next)
415 if (src->intr_handle.fd == event_fd)
418 rte_spinlock_unlock(&intr_lock);
422 /* mark this interrupt source as active and release the lock. */
424 rte_spinlock_unlock(&intr_lock);
426 /* set the length to be read dor different handle type */
427 switch (src->intr_handle.type) {
428 case RTE_INTR_HANDLE_ALARM:
432 case RTE_INTR_HANDLE_VDEV:
433 case RTE_INTR_HANDLE_EXT:
437 case RTE_INTR_HANDLE_DEV_EVENT:
446 if (bytes_read > 0) {
448 * read out to clear the ready-to-be-read flag
451 bytes_read = read(event_fd, &buf, bytes_read);
452 if (bytes_read < 0) {
453 if (errno == EINTR || errno == EWOULDBLOCK)
456 RTE_LOG(ERR, EAL, "Error reading from file "
457 "descriptor %d: %s\n",
460 } else if (bytes_read == 0)
461 RTE_LOG(ERR, EAL, "Read nothing from file "
462 "descriptor %d\n", event_fd);
467 /* grab a lock, again to call callbacks and update status. */
468 rte_spinlock_lock(&intr_lock);
471 /* Finally, call all callbacks. */
472 TAILQ_FOREACH(cb, &src->callbacks, next) {
474 /* make a copy and unlock. */
476 rte_spinlock_unlock(&intr_lock);
478 /* call the actual callback */
479 active_cb.cb_fn(active_cb.cb_arg);
481 /*get the lock back. */
482 rte_spinlock_lock(&intr_lock);
486 /* we done with that interrupt source, release it. */
489 /* check if any callback are supposed to be removed */
490 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
491 next = TAILQ_NEXT(cb, next);
492 if (cb->pending_delete) {
493 /* remove it from the kqueue */
494 memset(&ke, 0, sizeof(ke));
495 /* mark for deletion from the queue */
496 ke.flags = EV_DELETE;
498 if (intr_source_to_kevent(&src->intr_handle, &ke) < 0) {
499 RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
500 rte_spinlock_unlock(&intr_lock);
505 * remove intr file descriptor from wait list.
507 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
508 RTE_LOG(ERR, EAL, "Error removing fd %d kevent, "
509 "%s\n", src->intr_handle.fd,
511 /* removing non-existent even is an expected
512 * condition in some circumstances
513 * (e.g. oneshot events).
517 TAILQ_REMOVE(&src->callbacks, cb, next);
519 cb->ucb_fn(&src->intr_handle, cb->cb_arg);
524 /* all callbacks for that source are removed. */
525 if (TAILQ_EMPTY(&src->callbacks)) {
526 TAILQ_REMOVE(&intr_sources, src, next);
530 rte_spinlock_unlock(&intr_lock);
535 eal_intr_thread_main(void *arg __rte_unused)
537 struct kevent events[MAX_INTR_EVENTS];
540 /* host thread, never break out */
542 /* do not change anything, just wait */
543 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
550 "kevent returns with fail\n");
553 /* kevent timeout, will never happen here */
557 /* kevent has at least one fd ready to read */
558 eal_intr_process_interrupts(events, nfds);
566 rte_eal_intr_init(void)
570 /* init the global interrupt source head */
571 TAILQ_INIT(&intr_sources);
575 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
579 /* create the host thread to wait/handle the interrupt */
580 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
581 eal_intr_thread_main, NULL);
585 "Failed to create thread for interrupt handling\n");
592 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
593 int epfd, int op, unsigned int vec, void *data)
595 RTE_SET_USED(intr_handle);
605 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
607 RTE_SET_USED(intr_handle);
608 RTE_SET_USED(nb_efd);
614 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
616 RTE_SET_USED(intr_handle);
620 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
622 RTE_SET_USED(intr_handle);
627 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
629 RTE_SET_USED(intr_handle);
634 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
636 RTE_SET_USED(intr_handle);
641 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
642 int maxevents, int timeout)
645 RTE_SET_USED(events);
646 RTE_SET_USED(maxevents);
647 RTE_SET_USED(timeout);
653 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
664 rte_intr_tls_epfd(void)
670 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
672 RTE_SET_USED(intr_handle);