1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
17 #include "eal_private.h"
19 #define MAX_INTR_EVENTS 16
22 * union buffer for reading on different devices
24 union rte_intr_read_buffer {
25 char charbuf[16]; /* for others */
28 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
29 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
31 struct rte_intr_callback {
32 TAILQ_ENTRY(rte_intr_callback) next;
33 rte_intr_callback_fn cb_fn; /**< callback address */
34 void *cb_arg; /**< parameter for callback */
37 struct rte_intr_source {
38 TAILQ_ENTRY(rte_intr_source) next;
39 struct rte_intr_handle intr_handle; /**< interrupt handle */
40 struct rte_intr_cb_list callbacks; /**< user callbacks */
44 /* global spinlock for interrupt data operation */
45 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
47 /* interrupt sources list */
48 static struct rte_intr_source_list intr_sources;
50 /* interrupt handling thread */
51 static pthread_t intr_thread;
53 static volatile int kq = -1;
56 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
58 ke->filter = EVFILT_READ;
65 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
66 rte_intr_callback_fn cb, void *cb_arg)
68 struct rte_intr_callback *callback = NULL;
69 struct rte_intr_source *src = NULL;
72 /* first do parameter checking */
73 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
75 "Registering with invalid input parameter\n");
79 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
83 /* allocate a new interrupt callback entity */
84 callback = calloc(1, sizeof(*callback));
85 if (callback == NULL) {
86 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
90 callback->cb_arg = cb_arg;
92 rte_spinlock_lock(&intr_lock);
94 /* check if there is at least one callback registered for the fd */
95 TAILQ_FOREACH(src, &intr_sources, next) {
96 if (src->intr_handle.fd == intr_handle->fd) {
97 /* we had no interrupts for this */
98 if (TAILQ_EMPTY(&src->callbacks))
101 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
107 /* no existing callbacks for this - add new source */
109 src = calloc(1, sizeof(*src));
111 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
115 src->intr_handle = *intr_handle;
116 TAILQ_INIT(&src->callbacks);
117 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
118 TAILQ_INSERT_TAIL(&intr_sources, src, next);
124 /* add events to the queue */
128 memset(&ke, 0, sizeof(ke));
129 ke.flags = EV_ADD; /* mark for addition to the queue */
131 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
132 RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
138 * add the intr file descriptor into wait list.
140 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
141 RTE_LOG(ERR, EAL, "Error adding fd %d kevent, %s\n",
142 src->intr_handle.fd, strerror(errno));
147 rte_spinlock_unlock(&intr_lock);
153 TAILQ_REMOVE(&(src->callbacks), callback, next);
154 if (TAILQ_EMPTY(&(src->callbacks))) {
155 TAILQ_REMOVE(&intr_sources, src, next);
160 rte_spinlock_unlock(&intr_lock);
165 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
166 rte_intr_callback_fn cb_fn, void *cb_arg)
169 struct rte_intr_source *src;
170 struct rte_intr_callback *cb, *next;
172 /* do parameter checking first */
173 if (intr_handle == NULL || intr_handle->fd < 0) {
175 "Unregistering with invalid input parameter\n");
179 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
183 rte_spinlock_lock(&intr_lock);
185 /* check if the insterrupt source for the fd is existent */
186 TAILQ_FOREACH(src, &intr_sources, next)
187 if (src->intr_handle.fd == intr_handle->fd)
190 /* No interrupt source registered for the fd */
194 /* interrupt source has some active callbacks right now. */
195 } else if (src->active != 0) {
204 /* remove it from the kqueue */
205 memset(&ke, 0, sizeof(ke));
206 ke.flags = EV_DELETE; /* mark for deletion from the queue */
208 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
209 RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
215 * remove intr file descriptor from wait list.
217 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
218 RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
219 src->intr_handle.fd, strerror(errno));
224 /*walk through the callbacks and remove all that match. */
225 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
226 next = TAILQ_NEXT(cb, next);
227 if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
228 cb->cb_arg == cb_arg)) {
229 TAILQ_REMOVE(&src->callbacks, cb, next);
235 /* all callbacks for that source are removed. */
236 if (TAILQ_EMPTY(&src->callbacks)) {
237 TAILQ_REMOVE(&intr_sources, src, next);
242 rte_spinlock_unlock(&intr_lock);
248 rte_intr_enable(const struct rte_intr_handle *intr_handle)
250 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
253 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
256 switch (intr_handle->type) {
257 /* not used at this moment */
258 case RTE_INTR_HANDLE_ALARM:
260 /* not used at this moment */
261 case RTE_INTR_HANDLE_DEV_EVENT:
263 /* unknown handle type */
266 "Unknown handle type of fd %d\n",
275 rte_intr_disable(const struct rte_intr_handle *intr_handle)
277 if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
280 if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
283 switch (intr_handle->type) {
284 /* not used at this moment */
285 case RTE_INTR_HANDLE_ALARM:
287 /* not used at this moment */
288 case RTE_INTR_HANDLE_DEV_EVENT:
290 /* unknown handle type */
293 "Unknown handle type of fd %d\n",
302 eal_intr_process_interrupts(struct kevent *events, int nfds)
304 struct rte_intr_callback active_cb;
305 union rte_intr_read_buffer buf;
306 struct rte_intr_callback *cb;
307 struct rte_intr_source *src;
311 for (n = 0; n < nfds; n++) {
312 int event_fd = events[n].ident;
314 rte_spinlock_lock(&intr_lock);
315 TAILQ_FOREACH(src, &intr_sources, next)
316 if (src->intr_handle.fd == event_fd)
319 rte_spinlock_unlock(&intr_lock);
323 /* mark this interrupt source as active and release the lock. */
325 rte_spinlock_unlock(&intr_lock);
327 /* set the length to be read dor different handle type */
328 switch (src->intr_handle.type) {
329 case RTE_INTR_HANDLE_ALARM:
333 case RTE_INTR_HANDLE_VDEV:
334 case RTE_INTR_HANDLE_EXT:
338 case RTE_INTR_HANDLE_DEV_EVENT:
347 if (bytes_read > 0) {
349 * read out to clear the ready-to-be-read flag
352 bytes_read = read(event_fd, &buf, bytes_read);
353 if (bytes_read < 0) {
354 if (errno == EINTR || errno == EWOULDBLOCK)
357 RTE_LOG(ERR, EAL, "Error reading from file "
358 "descriptor %d: %s\n",
361 } else if (bytes_read == 0)
362 RTE_LOG(ERR, EAL, "Read nothing from file "
363 "descriptor %d\n", event_fd);
368 /* grab a lock, again to call callbacks and update status. */
369 rte_spinlock_lock(&intr_lock);
372 /* Finally, call all callbacks. */
373 TAILQ_FOREACH(cb, &src->callbacks, next) {
375 /* make a copy and unlock. */
377 rte_spinlock_unlock(&intr_lock);
379 /* call the actual callback */
380 active_cb.cb_fn(active_cb.cb_arg);
382 /*get the lock back. */
383 rte_spinlock_lock(&intr_lock);
387 /* we done with that interrupt source, release it. */
389 rte_spinlock_unlock(&intr_lock);
394 eal_intr_thread_main(void *arg __rte_unused)
396 struct kevent events[MAX_INTR_EVENTS];
399 /* host thread, never break out */
401 /* do not change anything, just wait */
402 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
409 "kevent returns with fail\n");
412 /* kevent timeout, will never happen here */
416 /* kevent has at least one fd ready to read */
417 eal_intr_process_interrupts(events, nfds);
425 rte_eal_intr_init(void)
429 /* init the global interrupt source head */
430 TAILQ_INIT(&intr_sources);
434 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
438 /* create the host thread to wait/handle the interrupt */
439 ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
440 eal_intr_thread_main, NULL);
444 "Failed to create thread for interrupt handling\n");
451 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
452 int epfd, int op, unsigned int vec, void *data)
454 RTE_SET_USED(intr_handle);
464 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
466 RTE_SET_USED(intr_handle);
467 RTE_SET_USED(nb_efd);
473 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
475 RTE_SET_USED(intr_handle);
479 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
481 RTE_SET_USED(intr_handle);
486 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
488 RTE_SET_USED(intr_handle);
493 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
495 RTE_SET_USED(intr_handle);
500 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
501 int maxevents, int timeout)
504 RTE_SET_USED(events);
505 RTE_SET_USED(maxevents);
506 RTE_SET_USED(timeout);
512 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
523 rte_intr_tls_epfd(void)
529 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
531 RTE_SET_USED(intr_handle);