eal/bsd: add interrupt thread
[dpdk.git] / lib / librte_eal / bsdapp / eal / eal_interrupts.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/event.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
16
17 #include "eal_private.h"
18
19 #define MAX_INTR_EVENTS 16
20
21 /**
22  * union buffer for reading on different devices
23  */
24 union rte_intr_read_buffer {
25         char charbuf[16];                /* for others */
26 };
27
28 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
29 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
30
31 struct rte_intr_callback {
32         TAILQ_ENTRY(rte_intr_callback) next;
33         rte_intr_callback_fn cb_fn;  /**< callback address */
34         void *cb_arg;                /**< parameter for callback */
35 };
36
37 struct rte_intr_source {
38         TAILQ_ENTRY(rte_intr_source) next;
39         struct rte_intr_handle intr_handle; /**< interrupt handle */
40         struct rte_intr_cb_list callbacks;  /**< user callbacks */
41         uint32_t active;
42 };
43
44 /* global spinlock for interrupt data operation */
45 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
46
47 /* interrupt sources list */
48 static struct rte_intr_source_list intr_sources;
49
50 /* interrupt handling thread */
51 static pthread_t intr_thread;
52
53 static volatile int kq = -1;
54
55 static int
56 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
57 {
58         ke->filter = EVFILT_READ;
59         ke->ident = ih->fd;
60
61         return 0;
62 }
63
64 int
65 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
66                 rte_intr_callback_fn cb, void *cb_arg)
67 {
68         struct rte_intr_callback *callback = NULL;
69         struct rte_intr_source *src = NULL;
70         int ret, add_event;
71
72         /* first do parameter checking */
73         if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
74                 RTE_LOG(ERR, EAL,
75                         "Registering with invalid input parameter\n");
76                 return -EINVAL;
77         }
78         if (kq < 0) {
79                 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
80                 return -ENODEV;
81         }
82
83         /* allocate a new interrupt callback entity */
84         callback = calloc(1, sizeof(*callback));
85         if (callback == NULL) {
86                 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
87                 return -ENOMEM;
88         }
89         callback->cb_fn = cb;
90         callback->cb_arg = cb_arg;
91
92         rte_spinlock_lock(&intr_lock);
93
94         /* check if there is at least one callback registered for the fd */
95         TAILQ_FOREACH(src, &intr_sources, next) {
96                 if (src->intr_handle.fd == intr_handle->fd) {
97                         /* we had no interrupts for this */
98                         if (TAILQ_EMPTY(&src->callbacks))
99                                 add_event = 1;
100
101                         TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
102                         ret = 0;
103                         break;
104                 }
105         }
106
107         /* no existing callbacks for this - add new source */
108         if (src == NULL) {
109                 src = calloc(1, sizeof(*src));
110                 if (src == NULL) {
111                         RTE_LOG(ERR, EAL, "Can not allocate memory\n");
112                         ret = -ENOMEM;
113                         goto fail;
114                 } else {
115                         src->intr_handle = *intr_handle;
116                         TAILQ_INIT(&src->callbacks);
117                         TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
118                         TAILQ_INSERT_TAIL(&intr_sources, src, next);
119                         add_event = 1;
120                         ret = 0;
121                 }
122         }
123
124         /* add events to the queue */
125         if (add_event) {
126                 struct kevent ke;
127
128                 memset(&ke, 0, sizeof(ke));
129                 ke.flags = EV_ADD; /* mark for addition to the queue */
130
131                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
132                         RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
133                         ret = -ENODEV;
134                         goto fail;
135                 }
136
137                 /**
138                  * add the intr file descriptor into wait list.
139                  */
140                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
141                         RTE_LOG(ERR, EAL, "Error adding fd %d kevent, %s\n",
142                                 src->intr_handle.fd, strerror(errno));
143                         ret = -errno;
144                         goto fail;
145                 }
146         }
147         rte_spinlock_unlock(&intr_lock);
148
149         return ret;
150 fail:
151         /* clean up */
152         if (src != NULL) {
153                 TAILQ_REMOVE(&(src->callbacks), callback, next);
154                 if (TAILQ_EMPTY(&(src->callbacks))) {
155                         TAILQ_REMOVE(&intr_sources, src, next);
156                         free(src);
157                 }
158         }
159         free(callback);
160         rte_spinlock_unlock(&intr_lock);
161         return ret;
162 }
163
164 int
165 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
166                 rte_intr_callback_fn cb_fn, void *cb_arg)
167 {
168         int ret;
169         struct rte_intr_source *src;
170         struct rte_intr_callback *cb, *next;
171
172         /* do parameter checking first */
173         if (intr_handle == NULL || intr_handle->fd < 0) {
174                 RTE_LOG(ERR, EAL,
175                 "Unregistering with invalid input parameter\n");
176                 return -EINVAL;
177         }
178         if (kq < 0) {
179                 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
180                 return -ENODEV;
181         }
182
183         rte_spinlock_lock(&intr_lock);
184
185         /* check if the insterrupt source for the fd is existent */
186         TAILQ_FOREACH(src, &intr_sources, next)
187                 if (src->intr_handle.fd == intr_handle->fd)
188                         break;
189
190         /* No interrupt source registered for the fd */
191         if (src == NULL) {
192                 ret = -ENOENT;
193
194         /* interrupt source has some active callbacks right now. */
195         } else if (src->active != 0) {
196                 ret = -EAGAIN;
197
198         /* ok to remove. */
199         } else {
200                 struct kevent ke;
201
202                 ret = 0;
203
204                 /* remove it from the kqueue */
205                 memset(&ke, 0, sizeof(ke));
206                 ke.flags = EV_DELETE; /* mark for deletion from the queue */
207
208                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
209                         RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
210                         ret = -ENODEV;
211                         goto out;
212                 }
213
214                 /**
215                  * remove intr file descriptor from wait list.
216                  */
217                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
218                         RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
219                                 src->intr_handle.fd, strerror(errno));
220                         ret = -errno;
221                         goto out;
222                 }
223
224                 /*walk through the callbacks and remove all that match. */
225                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
226                         next = TAILQ_NEXT(cb, next);
227                         if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
228                                         cb->cb_arg == cb_arg)) {
229                                 TAILQ_REMOVE(&src->callbacks, cb, next);
230                                 free(cb);
231                                 ret++;
232                         }
233                 }
234
235                 /* all callbacks for that source are removed. */
236                 if (TAILQ_EMPTY(&src->callbacks)) {
237                         TAILQ_REMOVE(&intr_sources, src, next);
238                         free(src);
239                 }
240         }
241 out:
242         rte_spinlock_unlock(&intr_lock);
243
244         return ret;
245 }
246
247 int
248 rte_intr_enable(const struct rte_intr_handle *intr_handle)
249 {
250         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
251                 return 0;
252
253         if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
254                 return -1;
255
256         switch (intr_handle->type) {
257         /* not used at this moment */
258         case RTE_INTR_HANDLE_ALARM:
259                 return -1;
260         /* not used at this moment */
261         case RTE_INTR_HANDLE_DEV_EVENT:
262                 return -1;
263         /* unknown handle type */
264         default:
265                 RTE_LOG(ERR, EAL,
266                         "Unknown handle type of fd %d\n",
267                                         intr_handle->fd);
268                 return -1;
269         }
270
271         return 0;
272 }
273
274 int
275 rte_intr_disable(const struct rte_intr_handle *intr_handle)
276 {
277         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
278                 return 0;
279
280         if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
281                 return -1;
282
283         switch (intr_handle->type) {
284         /* not used at this moment */
285         case RTE_INTR_HANDLE_ALARM:
286                 return -1;
287         /* not used at this moment */
288         case RTE_INTR_HANDLE_DEV_EVENT:
289                 return -1;
290         /* unknown handle type */
291         default:
292                 RTE_LOG(ERR, EAL,
293                         "Unknown handle type of fd %d\n",
294                                         intr_handle->fd);
295                 return -1;
296         }
297
298         return 0;
299 }
300
301 static void
302 eal_intr_process_interrupts(struct kevent *events, int nfds)
303 {
304         struct rte_intr_callback active_cb;
305         union rte_intr_read_buffer buf;
306         struct rte_intr_callback *cb;
307         struct rte_intr_source *src;
308         bool call = false;
309         int n, bytes_read;
310
311         for (n = 0; n < nfds; n++) {
312                 int event_fd = events[n].ident;
313
314                 rte_spinlock_lock(&intr_lock);
315                 TAILQ_FOREACH(src, &intr_sources, next)
316                         if (src->intr_handle.fd == event_fd)
317                                 break;
318                 if (src == NULL) {
319                         rte_spinlock_unlock(&intr_lock);
320                         continue;
321                 }
322
323                 /* mark this interrupt source as active and release the lock. */
324                 src->active = 1;
325                 rte_spinlock_unlock(&intr_lock);
326
327                 /* set the length to be read dor different handle type */
328                 switch (src->intr_handle.type) {
329                 case RTE_INTR_HANDLE_ALARM:
330                         bytes_read = 0;
331                         call = true;
332                         break;
333                 case RTE_INTR_HANDLE_VDEV:
334                 case RTE_INTR_HANDLE_EXT:
335                         bytes_read = 0;
336                         call = true;
337                         break;
338                 case RTE_INTR_HANDLE_DEV_EVENT:
339                         bytes_read = 0;
340                         call = true;
341                         break;
342                 default:
343                         bytes_read = 1;
344                         break;
345                 }
346
347                 if (bytes_read > 0) {
348                         /**
349                          * read out to clear the ready-to-be-read flag
350                          * for epoll_wait.
351                          */
352                         bytes_read = read(event_fd, &buf, bytes_read);
353                         if (bytes_read < 0) {
354                                 if (errno == EINTR || errno == EWOULDBLOCK)
355                                         continue;
356
357                                 RTE_LOG(ERR, EAL, "Error reading from file "
358                                         "descriptor %d: %s\n",
359                                         event_fd,
360                                         strerror(errno));
361                         } else if (bytes_read == 0)
362                                 RTE_LOG(ERR, EAL, "Read nothing from file "
363                                         "descriptor %d\n", event_fd);
364                         else
365                                 call = true;
366                 }
367
368                 /* grab a lock, again to call callbacks and update status. */
369                 rte_spinlock_lock(&intr_lock);
370
371                 if (call) {
372                         /* Finally, call all callbacks. */
373                         TAILQ_FOREACH(cb, &src->callbacks, next) {
374
375                                 /* make a copy and unlock. */
376                                 active_cb = *cb;
377                                 rte_spinlock_unlock(&intr_lock);
378
379                                 /* call the actual callback */
380                                 active_cb.cb_fn(active_cb.cb_arg);
381
382                                 /*get the lock back. */
383                                 rte_spinlock_lock(&intr_lock);
384                         }
385                 }
386
387                 /* we done with that interrupt source, release it. */
388                 src->active = 0;
389                 rte_spinlock_unlock(&intr_lock);
390         }
391 }
392
393 static void *
394 eal_intr_thread_main(void *arg __rte_unused)
395 {
396         struct kevent events[MAX_INTR_EVENTS];
397         int nfds;
398
399         /* host thread, never break out */
400         for (;;) {
401                 /* do not change anything, just wait */
402                 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
403
404                 /* kevent fail */
405                 if (nfds < 0) {
406                         if (errno == EINTR)
407                                 continue;
408                         RTE_LOG(ERR, EAL,
409                                 "kevent returns with fail\n");
410                         break;
411                 }
412                 /* kevent timeout, will never happen here */
413                 else if (nfds == 0)
414                         continue;
415
416                 /* kevent has at least one fd ready to read */
417                 eal_intr_process_interrupts(events, nfds);
418         }
419         close(kq);
420         kq = -1;
421         return NULL;
422 }
423
424 int
425 rte_eal_intr_init(void)
426 {
427         int ret = 0;
428
429         /* init the global interrupt source head */
430         TAILQ_INIT(&intr_sources);
431
432         kq = kqueue();
433         if (kq < 0) {
434                 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
435                 return -1;
436         }
437
438         /* create the host thread to wait/handle the interrupt */
439         ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
440                         eal_intr_thread_main, NULL);
441         if (ret != 0) {
442                 rte_errno = -ret;
443                 RTE_LOG(ERR, EAL,
444                         "Failed to create thread for interrupt handling\n");
445         }
446
447         return ret;
448 }
449
450 int
451 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
452                 int epfd, int op, unsigned int vec, void *data)
453 {
454         RTE_SET_USED(intr_handle);
455         RTE_SET_USED(epfd);
456         RTE_SET_USED(op);
457         RTE_SET_USED(vec);
458         RTE_SET_USED(data);
459
460         return -ENOTSUP;
461 }
462
463 int
464 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
465 {
466         RTE_SET_USED(intr_handle);
467         RTE_SET_USED(nb_efd);
468
469         return 0;
470 }
471
472 void
473 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
474 {
475         RTE_SET_USED(intr_handle);
476 }
477
478 int
479 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
480 {
481         RTE_SET_USED(intr_handle);
482         return 0;
483 }
484
485 int
486 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
487 {
488         RTE_SET_USED(intr_handle);
489         return 1;
490 }
491
492 int
493 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
494 {
495         RTE_SET_USED(intr_handle);
496         return 0;
497 }
498
499 int
500 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
501                 int maxevents, int timeout)
502 {
503         RTE_SET_USED(epfd);
504         RTE_SET_USED(events);
505         RTE_SET_USED(maxevents);
506         RTE_SET_USED(timeout);
507
508         return -ENOTSUP;
509 }
510
511 int
512 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
513 {
514         RTE_SET_USED(epfd);
515         RTE_SET_USED(op);
516         RTE_SET_USED(fd);
517         RTE_SET_USED(event);
518
519         return -ENOTSUP;
520 }
521
522 int
523 rte_intr_tls_epfd(void)
524 {
525         return -ENOTSUP;
526 }
527
528 void
529 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
530 {
531         RTE_SET_USED(intr_handle);
532 }