29356f320fa1d5bb7611f85adcf0d2caec776135
[dpdk.git] / lib / librte_eal / bsdapp / eal / eal_interrupts.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/event.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
16
17 #include "eal_private.h"
18 #include "eal_alarm_private.h"
19
20 #define MAX_INTR_EVENTS 16
21
22 /**
23  * union buffer for reading on different devices
24  */
25 union rte_intr_read_buffer {
26         char charbuf[16];                /* for others */
27 };
28
29 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
30 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
31
32 struct rte_intr_callback {
33         TAILQ_ENTRY(rte_intr_callback) next;
34         rte_intr_callback_fn cb_fn;  /**< callback address */
35         void *cb_arg;                /**< parameter for callback */
36 };
37
38 struct rte_intr_source {
39         TAILQ_ENTRY(rte_intr_source) next;
40         struct rte_intr_handle intr_handle; /**< interrupt handle */
41         struct rte_intr_cb_list callbacks;  /**< user callbacks */
42         uint32_t active;
43 };
44
45 /* global spinlock for interrupt data operation */
46 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
47
48 /* interrupt sources list */
49 static struct rte_intr_source_list intr_sources;
50
51 /* interrupt handling thread */
52 static pthread_t intr_thread;
53
54 static volatile int kq = -1;
55
56 static int
57 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
58 {
59         /* alarm callbacks are special case */
60         if (ih->type == RTE_INTR_HANDLE_ALARM) {
61                 uint64_t timeout_ns;
62
63                 /* get soonest alarm timeout */
64                 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
65                         return -1;
66
67                 ke->filter = EVFILT_TIMER;
68                 /* timers are one shot */
69                 ke->flags |= EV_ONESHOT;
70                 ke->fflags = NOTE_NSECONDS;
71                 ke->data = timeout_ns;
72         } else {
73                 ke->filter = EVFILT_READ;
74         }
75         ke->ident = ih->fd;
76
77         return 0;
78 }
79
80 int
81 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
82                 rte_intr_callback_fn cb, void *cb_arg)
83 {
84         struct rte_intr_callback *callback = NULL;
85         struct rte_intr_source *src = NULL;
86         int ret, add_event;
87
88         /* first do parameter checking */
89         if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
90                 RTE_LOG(ERR, EAL,
91                         "Registering with invalid input parameter\n");
92                 return -EINVAL;
93         }
94         if (kq < 0) {
95                 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
96                 return -ENODEV;
97         }
98
99         /* allocate a new interrupt callback entity */
100         callback = calloc(1, sizeof(*callback));
101         if (callback == NULL) {
102                 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
103                 return -ENOMEM;
104         }
105         callback->cb_fn = cb;
106         callback->cb_arg = cb_arg;
107
108         rte_spinlock_lock(&intr_lock);
109
110         /* check if there is at least one callback registered for the fd */
111         TAILQ_FOREACH(src, &intr_sources, next) {
112                 if (src->intr_handle.fd == intr_handle->fd) {
113                         /* we had no interrupts for this */
114                         if (TAILQ_EMPTY(&src->callbacks))
115                                 add_event = 1;
116
117                         TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
118                         ret = 0;
119                         break;
120                 }
121         }
122
123         /* no existing callbacks for this - add new source */
124         if (src == NULL) {
125                 src = calloc(1, sizeof(*src));
126                 if (src == NULL) {
127                         RTE_LOG(ERR, EAL, "Can not allocate memory\n");
128                         ret = -ENOMEM;
129                         goto fail;
130                 } else {
131                         src->intr_handle = *intr_handle;
132                         TAILQ_INIT(&src->callbacks);
133                         TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
134                         TAILQ_INSERT_TAIL(&intr_sources, src, next);
135                         add_event = 1;
136                         ret = 0;
137                 }
138         }
139
140         /* add events to the queue. timer events are special as we need to
141          * re-set the timer.
142          */
143         if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
144                 struct kevent ke;
145
146                 memset(&ke, 0, sizeof(ke));
147                 ke.flags = EV_ADD; /* mark for addition to the queue */
148
149                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
150                         RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
151                         ret = -ENODEV;
152                         goto fail;
153                 }
154
155                 /**
156                  * add the intr file descriptor into wait list.
157                  */
158                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
159                         RTE_LOG(ERR, EAL, "Error adding fd %d kevent, %s\n",
160                                 src->intr_handle.fd, strerror(errno));
161                         ret = -errno;
162                         goto fail;
163                 }
164         }
165         rte_spinlock_unlock(&intr_lock);
166
167         return ret;
168 fail:
169         /* clean up */
170         if (src != NULL) {
171                 TAILQ_REMOVE(&(src->callbacks), callback, next);
172                 if (TAILQ_EMPTY(&(src->callbacks))) {
173                         TAILQ_REMOVE(&intr_sources, src, next);
174                         free(src);
175                 }
176         }
177         free(callback);
178         rte_spinlock_unlock(&intr_lock);
179         return ret;
180 }
181
182 int
183 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
184                 rte_intr_callback_fn cb_fn, void *cb_arg)
185 {
186         int ret;
187         struct rte_intr_source *src;
188         struct rte_intr_callback *cb, *next;
189
190         /* do parameter checking first */
191         if (intr_handle == NULL || intr_handle->fd < 0) {
192                 RTE_LOG(ERR, EAL,
193                 "Unregistering with invalid input parameter\n");
194                 return -EINVAL;
195         }
196         if (kq < 0) {
197                 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
198                 return -ENODEV;
199         }
200
201         rte_spinlock_lock(&intr_lock);
202
203         /* check if the insterrupt source for the fd is existent */
204         TAILQ_FOREACH(src, &intr_sources, next)
205                 if (src->intr_handle.fd == intr_handle->fd)
206                         break;
207
208         /* No interrupt source registered for the fd */
209         if (src == NULL) {
210                 ret = -ENOENT;
211
212         /* interrupt source has some active callbacks right now. */
213         } else if (src->active != 0) {
214                 ret = -EAGAIN;
215
216         /* ok to remove. */
217         } else {
218                 struct kevent ke;
219
220                 ret = 0;
221
222                 /* remove it from the kqueue */
223                 memset(&ke, 0, sizeof(ke));
224                 ke.flags = EV_DELETE; /* mark for deletion from the queue */
225
226                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
227                         RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
228                         ret = -ENODEV;
229                         goto out;
230                 }
231
232                 /**
233                  * remove intr file descriptor from wait list.
234                  */
235                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
236                         RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
237                                 src->intr_handle.fd, strerror(errno));
238                         /* removing non-existent even is an expected condition
239                          * in some circumstances (e.g. oneshot events).
240                          */
241                 }
242
243                 /*walk through the callbacks and remove all that match. */
244                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
245                         next = TAILQ_NEXT(cb, next);
246                         if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
247                                         cb->cb_arg == cb_arg)) {
248                                 TAILQ_REMOVE(&src->callbacks, cb, next);
249                                 free(cb);
250                                 ret++;
251                         }
252                 }
253
254                 /* all callbacks for that source are removed. */
255                 if (TAILQ_EMPTY(&src->callbacks)) {
256                         TAILQ_REMOVE(&intr_sources, src, next);
257                         free(src);
258                 }
259         }
260 out:
261         rte_spinlock_unlock(&intr_lock);
262
263         return ret;
264 }
265
266 int
267 rte_intr_enable(const struct rte_intr_handle *intr_handle)
268 {
269         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
270                 return 0;
271
272         if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
273                 return -1;
274
275         switch (intr_handle->type) {
276         /* not used at this moment */
277         case RTE_INTR_HANDLE_ALARM:
278                 return -1;
279         /* not used at this moment */
280         case RTE_INTR_HANDLE_DEV_EVENT:
281                 return -1;
282         /* unknown handle type */
283         default:
284                 RTE_LOG(ERR, EAL,
285                         "Unknown handle type of fd %d\n",
286                                         intr_handle->fd);
287                 return -1;
288         }
289
290         return 0;
291 }
292
293 int
294 rte_intr_disable(const struct rte_intr_handle *intr_handle)
295 {
296         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
297                 return 0;
298
299         if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
300                 return -1;
301
302         switch (intr_handle->type) {
303         /* not used at this moment */
304         case RTE_INTR_HANDLE_ALARM:
305                 return -1;
306         /* not used at this moment */
307         case RTE_INTR_HANDLE_DEV_EVENT:
308                 return -1;
309         /* unknown handle type */
310         default:
311                 RTE_LOG(ERR, EAL,
312                         "Unknown handle type of fd %d\n",
313                                         intr_handle->fd);
314                 return -1;
315         }
316
317         return 0;
318 }
319
320 static void
321 eal_intr_process_interrupts(struct kevent *events, int nfds)
322 {
323         struct rte_intr_callback active_cb;
324         union rte_intr_read_buffer buf;
325         struct rte_intr_callback *cb;
326         struct rte_intr_source *src;
327         bool call = false;
328         int n, bytes_read;
329
330         for (n = 0; n < nfds; n++) {
331                 int event_fd = events[n].ident;
332
333                 rte_spinlock_lock(&intr_lock);
334                 TAILQ_FOREACH(src, &intr_sources, next)
335                         if (src->intr_handle.fd == event_fd)
336                                 break;
337                 if (src == NULL) {
338                         rte_spinlock_unlock(&intr_lock);
339                         continue;
340                 }
341
342                 /* mark this interrupt source as active and release the lock. */
343                 src->active = 1;
344                 rte_spinlock_unlock(&intr_lock);
345
346                 /* set the length to be read dor different handle type */
347                 switch (src->intr_handle.type) {
348                 case RTE_INTR_HANDLE_ALARM:
349                         bytes_read = 0;
350                         call = true;
351                         break;
352                 case RTE_INTR_HANDLE_VDEV:
353                 case RTE_INTR_HANDLE_EXT:
354                         bytes_read = 0;
355                         call = true;
356                         break;
357                 case RTE_INTR_HANDLE_DEV_EVENT:
358                         bytes_read = 0;
359                         call = true;
360                         break;
361                 default:
362                         bytes_read = 1;
363                         break;
364                 }
365
366                 if (bytes_read > 0) {
367                         /**
368                          * read out to clear the ready-to-be-read flag
369                          * for epoll_wait.
370                          */
371                         bytes_read = read(event_fd, &buf, bytes_read);
372                         if (bytes_read < 0) {
373                                 if (errno == EINTR || errno == EWOULDBLOCK)
374                                         continue;
375
376                                 RTE_LOG(ERR, EAL, "Error reading from file "
377                                         "descriptor %d: %s\n",
378                                         event_fd,
379                                         strerror(errno));
380                         } else if (bytes_read == 0)
381                                 RTE_LOG(ERR, EAL, "Read nothing from file "
382                                         "descriptor %d\n", event_fd);
383                         else
384                                 call = true;
385                 }
386
387                 /* grab a lock, again to call callbacks and update status. */
388                 rte_spinlock_lock(&intr_lock);
389
390                 if (call) {
391                         /* Finally, call all callbacks. */
392                         TAILQ_FOREACH(cb, &src->callbacks, next) {
393
394                                 /* make a copy and unlock. */
395                                 active_cb = *cb;
396                                 rte_spinlock_unlock(&intr_lock);
397
398                                 /* call the actual callback */
399                                 active_cb.cb_fn(active_cb.cb_arg);
400
401                                 /*get the lock back. */
402                                 rte_spinlock_lock(&intr_lock);
403                         }
404                 }
405
406                 /* we done with that interrupt source, release it. */
407                 src->active = 0;
408                 rte_spinlock_unlock(&intr_lock);
409         }
410 }
411
412 static void *
413 eal_intr_thread_main(void *arg __rte_unused)
414 {
415         struct kevent events[MAX_INTR_EVENTS];
416         int nfds;
417
418         /* host thread, never break out */
419         for (;;) {
420                 /* do not change anything, just wait */
421                 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
422
423                 /* kevent fail */
424                 if (nfds < 0) {
425                         if (errno == EINTR)
426                                 continue;
427                         RTE_LOG(ERR, EAL,
428                                 "kevent returns with fail\n");
429                         break;
430                 }
431                 /* kevent timeout, will never happen here */
432                 else if (nfds == 0)
433                         continue;
434
435                 /* kevent has at least one fd ready to read */
436                 eal_intr_process_interrupts(events, nfds);
437         }
438         close(kq);
439         kq = -1;
440         return NULL;
441 }
442
443 int
444 rte_eal_intr_init(void)
445 {
446         int ret = 0;
447
448         /* init the global interrupt source head */
449         TAILQ_INIT(&intr_sources);
450
451         kq = kqueue();
452         if (kq < 0) {
453                 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
454                 return -1;
455         }
456
457         /* create the host thread to wait/handle the interrupt */
458         ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
459                         eal_intr_thread_main, NULL);
460         if (ret != 0) {
461                 rte_errno = -ret;
462                 RTE_LOG(ERR, EAL,
463                         "Failed to create thread for interrupt handling\n");
464         }
465
466         return ret;
467 }
468
469 int
470 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
471                 int epfd, int op, unsigned int vec, void *data)
472 {
473         RTE_SET_USED(intr_handle);
474         RTE_SET_USED(epfd);
475         RTE_SET_USED(op);
476         RTE_SET_USED(vec);
477         RTE_SET_USED(data);
478
479         return -ENOTSUP;
480 }
481
482 int
483 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
484 {
485         RTE_SET_USED(intr_handle);
486         RTE_SET_USED(nb_efd);
487
488         return 0;
489 }
490
491 void
492 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
493 {
494         RTE_SET_USED(intr_handle);
495 }
496
497 int
498 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
499 {
500         RTE_SET_USED(intr_handle);
501         return 0;
502 }
503
504 int
505 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
506 {
507         RTE_SET_USED(intr_handle);
508         return 1;
509 }
510
511 int
512 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
513 {
514         RTE_SET_USED(intr_handle);
515         return 0;
516 }
517
518 int
519 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
520                 int maxevents, int timeout)
521 {
522         RTE_SET_USED(epfd);
523         RTE_SET_USED(events);
524         RTE_SET_USED(maxevents);
525         RTE_SET_USED(timeout);
526
527         return -ENOTSUP;
528 }
529
530 int
531 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
532 {
533         RTE_SET_USED(epfd);
534         RTE_SET_USED(op);
535         RTE_SET_USED(fd);
536         RTE_SET_USED(event);
537
538         return -ENOTSUP;
539 }
540
541 int
542 rte_intr_tls_epfd(void)
543 {
544         return -ENOTSUP;
545 }
546
547 void
548 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
549 {
550         RTE_SET_USED(intr_handle);
551 }