4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
46 #include <sys/epoll.h>
47 #include <sys/signalfd.h>
49 #include <rte_common.h>
50 #include <rte_interrupts.h>
51 #include <rte_memory.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
61 #include <rte_debug.h>
63 #include <rte_mempool.h>
65 #include <rte_malloc.h>
66 #include <rte_errno.h>
67 #include <rte_spinlock.h>
69 #include "eal_private.h"
71 #define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
87 * union buffer for reading on different devices
89 union rte_intr_read_buffer {
90 int uio_intr_count; /* for uio device */
91 uint64_t timerfd_num; /* for timerfd */
92 char charbuf[16]; /* for others */
95 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
96 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
98 struct rte_intr_callback {
99 TAILQ_ENTRY(rte_intr_callback) next;
100 rte_intr_callback_fn cb_fn; /**< callback address */
101 void *cb_arg; /**< parameter for callback */
104 struct rte_intr_source {
105 TAILQ_ENTRY(rte_intr_source) next;
106 struct rte_intr_handle intr_handle; /**< interrupt handle */
107 struct rte_intr_cb_list callbacks; /**< user callbacks */
110 /* global spinlock for interrupt data operation */
111 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
113 /* union buffer for pipe read/write */
114 static union intr_pipefds intr_pipe;
116 /* interrupt sources list */
117 static struct rte_intr_source_list intr_sources;
119 /* interrupt handling thread */
120 static pthread_t intr_thread;
123 rte_intr_callback_register(struct rte_intr_handle *intr_handle,
124 rte_intr_callback_fn cb, void *cb_arg)
127 struct rte_intr_source *src;
130 /* first do parameter checking */
131 if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
133 "Registering with invalid input parameter\n");
137 /* allocate a new interrupt callback entity */
138 struct rte_intr_callback *callback =
139 rte_zmalloc("interrupt callback list",
140 sizeof(*callback), 0);
141 if (callback == NULL) {
142 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
145 callback->cb_fn = cb;
146 callback->cb_arg = cb_arg;
148 rte_spinlock_lock(&intr_lock);
150 /* check if there is at least one callback registered for the fd */
151 TAILQ_FOREACH(src, &intr_sources, next)
152 if (src->intr_handle.fd == intr_handle->fd) {
153 if (src->callbacks.tqh_first == NULL)
154 /* we had no interrupts for this */
157 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
161 /* No callback registered for this fd */
163 /* no existing callbacks for this - add new source */
164 src = rte_zmalloc("interrupt source list", sizeof(*src), 0);
166 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
170 src->intr_handle = *intr_handle;
171 TAILQ_INIT(&src->callbacks);
173 TAILQ_INSERT_TAIL(&intr_sources, src, next);
174 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
178 rte_spinlock_unlock(&intr_lock);
180 * check if need to notify the pipe fd waited by epoll_wait to
181 * rebuild the wait list.
184 if (write(intr_pipe.writefd, "1", 1) < 0)
190 rte_spinlock_unlock(&intr_lock);
196 rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
197 rte_intr_callback_fn cb_fn, void *cb_arg)
200 struct rte_intr_source *src;
201 struct rte_intr_callback *cb;
203 /* do parameter checking first */
204 if (intr_handle == NULL || intr_handle->fd < 0) {
206 "Unregistering with invalid input parameter\n");
210 rte_spinlock_lock(&intr_lock);
212 /* check if the insterrupt source for the fd is existent */
213 TAILQ_FOREACH(src, &intr_sources, next)
214 if (src->intr_handle.fd == intr_handle->fd)
217 /* No interrupt source registered for the fd */
224 TAILQ_FOREACH(cb, &src->callbacks, next) {
225 if (cb->cb_fn != cb_fn)
227 if (cb_arg == (void *)-1 || cb->cb_arg == cb_arg) {
228 TAILQ_REMOVE(&src->callbacks, cb, next);
233 if (src->callbacks.tqh_first == NULL) {
234 TAILQ_REMOVE(&intr_sources, src, next);
239 /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
240 if (write(intr_pipe.writefd, "1", 1) < 0) {
245 rte_spinlock_unlock(&intr_lock);
250 rte_spinlock_unlock(&intr_lock);
256 rte_intr_enable(struct rte_intr_handle *intr_handle)
260 if (!intr_handle || intr_handle->fd < 0)
263 switch (intr_handle->type){
264 /* write to the uio fd to enable the interrupt */
265 case RTE_INTR_HANDLE_UIO:
266 if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
268 "Error enabling interrupts for fd %d\n",
273 /* not used at this moment */
274 case RTE_INTR_HANDLE_ALARM:
276 /* unkown handle type */
279 "Unknown handle type of fd %d\n",
288 rte_intr_disable(struct rte_intr_handle *intr_handle)
292 if (!intr_handle || intr_handle->fd < 0)
295 switch (intr_handle->type){
296 /* write to the uio fd to disable the interrupt */
297 case RTE_INTR_HANDLE_UIO:
298 if (write(intr_handle->fd, &value, sizeof(value)) < 0){
300 "Error enabling interrupts for fd %d\n",
305 /* not used at this moment */
306 case RTE_INTR_HANDLE_ALARM:
308 /* unkown handle type */
311 "Unknown handle type of fd %d\n",
320 eal_intr_process_interrupts(struct epoll_event *events, int nfds)
322 int n, i, active_cb, bytes_read;
323 struct rte_intr_source *src;
324 struct rte_intr_callback *cb;
325 union rte_intr_read_buffer buf;
326 struct rte_intr_callback active_cbs[32];
328 for (n = 0; n < nfds; n++) {
330 * if the pipe fd is ready to read, return out to
331 * rebuild the wait list.
333 if (events[n].data.fd == intr_pipe.readfd){
334 int r = read(intr_pipe.readfd, buf.charbuf,
335 sizeof(buf.charbuf));
339 rte_spinlock_lock(&intr_lock);
340 TAILQ_FOREACH(src, &intr_sources, next)
341 if (src->intr_handle.fd ==
345 rte_spinlock_unlock(&intr_lock);
349 /* for this source, make a copy of all the callbacks,
350 * then unlock the lock, so the callbacks can
351 * themselves manipulate the list for future
355 memset(active_cbs, 0, sizeof(active_cbs));
356 TAILQ_FOREACH(cb, &src->callbacks, next)
357 active_cbs[active_cb++] = *cb;
358 rte_spinlock_unlock(&intr_lock);
360 /* set the length to be read dor different handle type */
361 switch (src->intr_handle.type) {
362 case RTE_INTR_HANDLE_UIO:
365 case RTE_INTR_HANDLE_ALARM:
366 bytes_read = sizeof(uint64_t);
373 * read out to clear the ready-to-be-read flag
376 bytes_read = read(events[n].data.fd, &buf, bytes_read);
377 if (bytes_read < 0) {
378 RTE_LOG(ERR, EAL, "Error reading from file descriptor"
379 " %d, error: %d\n", events[n].data.fd, errno);
382 else if (bytes_read == 0) {
384 "Read nothing from file descriptor %d.\n",
389 * Finally, call all callbacks from the copy
392 for (i = 0; i < active_cb; i++) {
393 if (active_cbs[i].cb_fn == NULL)
395 active_cbs[i].cb_fn(&src->intr_handle,
396 active_cbs[i].cb_arg);
404 * It handles all the interrupts.
407 * epoll file descriptor.
409 * The number of file descriptors added in epoll.
415 eal_intr_handle_interrupts(int pfd, unsigned totalfds)
417 struct epoll_event events[totalfds];
421 nfds = epoll_wait(pfd, events, totalfds,
422 EAL_INTR_EPOLL_WAIT_FOREVER);
423 /* epoll_wait fail */
428 "epoll_wait returns with fail\n");
431 /* epoll_wait timeout, will never happens here */
434 /* epoll_wait has at least one fd ready to read */
435 if (eal_intr_process_interrupts(events, nfds) < 0)
441 * It builds/rebuilds up the epoll file descriptor with all the
442 * file descriptors being waited on. Then handles the interrupts.
450 static __attribute__((noreturn)) void *
451 eal_intr_thread_main(__rte_unused void *arg)
453 struct epoll_event ev;
455 /* host thread, never break out */
457 /* build up the epoll fd with all descriptors we are to
458 * wait on then pass it to the handle_interrupts function
460 static struct epoll_event pipe_event = {
461 .events = EPOLLIN | EPOLLPRI,
463 struct rte_intr_source *src;
466 /* create epoll fd */
467 int pfd = epoll_create(1);
469 rte_panic("Cannot create epoll instance\n");
471 pipe_event.data.fd = intr_pipe.readfd;
473 * add pipe fd into wait list, this pipe is used to
474 * rebuild the wait list.
476 if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
478 rte_panic("Error adding fd to %d epoll_ctl, %s\n",
479 intr_pipe.readfd, strerror(errno));
483 rte_spinlock_lock(&intr_lock);
485 TAILQ_FOREACH(src, &intr_sources, next) {
486 if (src->callbacks.tqh_first == NULL)
487 continue; /* skip those with no callbacks */
488 ev.events = EPOLLIN | EPOLLPRI;
489 ev.data.fd = src->intr_handle.fd;
492 * add all the uio device file descriptor
495 if (epoll_ctl(pfd, EPOLL_CTL_ADD,
496 src->intr_handle.fd, &ev) < 0){
497 rte_panic("Error adding fd %d epoll_ctl, %s\n",
498 src->intr_handle.fd, strerror(errno));
503 rte_spinlock_unlock(&intr_lock);
504 /* serve the interrupt */
505 eal_intr_handle_interrupts(pfd, numfds);
508 * when we return, we need to rebuild the
509 * list of fds to monitor.
516 rte_eal_intr_init(void)
520 /* init the global interrupt source head */
521 TAILQ_INIT(&intr_sources);
524 * create a pipe which will be waited by epoll and notified to
525 * rebuild the wait list of epoll.
527 if (pipe(intr_pipe.pipefd) < 0)
530 /* create the host thread to wait/handle the interrupt */
531 ret = pthread_create(&intr_thread, NULL,
532 eal_intr_thread_main, NULL);
535 "Failed to create thread for interrupt handling\n");