ce2a27b4a5dd8e2433858bf885faf583d7eb65f3
[dpdk.git] / lib / librte_eal / freebsd / eal / eal_interrupts.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/event.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
16
17 #include "eal_private.h"
18 #include "eal_alarm_private.h"
19
20 #define MAX_INTR_EVENTS 16
21
22 /**
23  * union buffer for reading on different devices
24  */
25 union rte_intr_read_buffer {
26         char charbuf[16];                /* for others */
27 };
28
29 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
30 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
31
32 struct rte_intr_callback {
33         TAILQ_ENTRY(rte_intr_callback) next;
34         rte_intr_callback_fn cb_fn;  /**< callback address */
35         void *cb_arg;                /**< parameter for callback */
36         uint8_t pending_delete;      /**< delete after callback is called */
37         rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
38 };
39
40 struct rte_intr_source {
41         TAILQ_ENTRY(rte_intr_source) next;
42         struct rte_intr_handle intr_handle; /**< interrupt handle */
43         struct rte_intr_cb_list callbacks;  /**< user callbacks */
44         uint32_t active;
45 };
46
47 /* global spinlock for interrupt data operation */
48 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* interrupt sources list */
51 static struct rte_intr_source_list intr_sources;
52
53 /* interrupt handling thread */
54 static pthread_t intr_thread;
55
56 static volatile int kq = -1;
57
58 static int
59 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
60 {
61         /* alarm callbacks are special case */
62         if (ih->type == RTE_INTR_HANDLE_ALARM) {
63                 uint64_t timeout_ns;
64
65                 /* get soonest alarm timeout */
66                 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
67                         return -1;
68
69                 ke->filter = EVFILT_TIMER;
70                 /* timers are one shot */
71                 ke->flags |= EV_ONESHOT;
72                 ke->fflags = NOTE_NSECONDS;
73                 ke->data = timeout_ns;
74         } else {
75                 ke->filter = EVFILT_READ;
76         }
77         ke->ident = ih->fd;
78
79         return 0;
80 }
81
82 int
83 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
84                 rte_intr_callback_fn cb, void *cb_arg)
85 {
86         struct rte_intr_callback *callback = NULL;
87         struct rte_intr_source *src = NULL;
88         int ret, add_event;
89
90         /* first do parameter checking */
91         if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
92                 RTE_LOG(ERR, EAL,
93                         "Registering with invalid input parameter\n");
94                 return -EINVAL;
95         }
96         if (kq < 0) {
97                 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
98                 return -ENODEV;
99         }
100
101         /* allocate a new interrupt callback entity */
102         callback = calloc(1, sizeof(*callback));
103         if (callback == NULL) {
104                 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
105                 return -ENOMEM;
106         }
107         callback->cb_fn = cb;
108         callback->cb_arg = cb_arg;
109         callback->pending_delete = 0;
110         callback->ucb_fn = NULL;
111
112         rte_spinlock_lock(&intr_lock);
113
114         /* check if there is at least one callback registered for the fd */
115         TAILQ_FOREACH(src, &intr_sources, next) {
116                 if (src->intr_handle.fd == intr_handle->fd) {
117                         /* we had no interrupts for this */
118                         if (TAILQ_EMPTY(&src->callbacks))
119                                 add_event = 1;
120
121                         TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
122                         ret = 0;
123                         break;
124                 }
125         }
126
127         /* no existing callbacks for this - add new source */
128         if (src == NULL) {
129                 src = calloc(1, sizeof(*src));
130                 if (src == NULL) {
131                         RTE_LOG(ERR, EAL, "Can not allocate memory\n");
132                         ret = -ENOMEM;
133                         goto fail;
134                 } else {
135                         src->intr_handle = *intr_handle;
136                         TAILQ_INIT(&src->callbacks);
137                         TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
138                         TAILQ_INSERT_TAIL(&intr_sources, src, next);
139                         add_event = 1;
140                         ret = 0;
141                 }
142         }
143
144         /* add events to the queue. timer events are special as we need to
145          * re-set the timer.
146          */
147         if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
148                 struct kevent ke;
149
150                 memset(&ke, 0, sizeof(ke));
151                 ke.flags = EV_ADD; /* mark for addition to the queue */
152
153                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
154                         RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
155                         ret = -ENODEV;
156                         goto fail;
157                 }
158
159                 /**
160                  * add the intr file descriptor into wait list.
161                  */
162                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
163                         /* currently, nic_uio does not support interrupts, so
164                          * this error will always be triggered and output to the
165                          * user. so, don't output it unless debug log level set.
166                          */
167                         if (errno == ENODEV)
168                                 RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
169                                         src->intr_handle.fd);
170                         else
171                                 RTE_LOG(ERR, EAL, "Error adding fd %d "
172                                                 "kevent, %s\n",
173                                                 src->intr_handle.fd,
174                                                 strerror(errno));
175                         ret = -errno;
176                         goto fail;
177                 }
178         }
179         rte_spinlock_unlock(&intr_lock);
180
181         return ret;
182 fail:
183         /* clean up */
184         if (src != NULL) {
185                 TAILQ_REMOVE(&(src->callbacks), callback, next);
186                 if (TAILQ_EMPTY(&(src->callbacks))) {
187                         TAILQ_REMOVE(&intr_sources, src, next);
188                         free(src);
189                 }
190         }
191         free(callback);
192         rte_spinlock_unlock(&intr_lock);
193         return ret;
194 }
195
196 int
197 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
198                                 rte_intr_callback_fn cb_fn, void *cb_arg,
199                                 rte_intr_unregister_callback_fn ucb_fn)
200 {
201         int ret;
202         struct rte_intr_source *src;
203         struct rte_intr_callback *cb, *next;
204
205         /* do parameter checking first */
206         if (intr_handle == NULL || intr_handle->fd < 0) {
207                 RTE_LOG(ERR, EAL,
208                 "Unregistering with invalid input parameter\n");
209                 return -EINVAL;
210         }
211
212         if (kq < 0) {
213                 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
214                 return -ENODEV;
215         }
216
217         rte_spinlock_lock(&intr_lock);
218
219         /* check if the insterrupt source for the fd is existent */
220         TAILQ_FOREACH(src, &intr_sources, next)
221                 if (src->intr_handle.fd == intr_handle->fd)
222                         break;
223
224         /* No interrupt source registered for the fd */
225         if (src == NULL) {
226                 ret = -ENOENT;
227
228         /* only usable if the source is active */
229         } else if (src->active == 0) {
230                 ret = -EAGAIN;
231
232         } else {
233                 ret = 0;
234
235                 /* walk through the callbacks and mark all that match. */
236                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
237                         next = TAILQ_NEXT(cb, next);
238                         if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
239                                         cb->cb_arg == cb_arg)) {
240                                 cb->pending_delete = 1;
241                                 cb->ucb_fn = ucb_fn;
242                                 ret++;
243                         }
244                 }
245         }
246
247         rte_spinlock_unlock(&intr_lock);
248
249         return ret;
250 }
251
252 int
253 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
254                 rte_intr_callback_fn cb_fn, void *cb_arg)
255 {
256         int ret;
257         struct rte_intr_source *src;
258         struct rte_intr_callback *cb, *next;
259
260         /* do parameter checking first */
261         if (intr_handle == NULL || intr_handle->fd < 0) {
262                 RTE_LOG(ERR, EAL,
263                 "Unregistering with invalid input parameter\n");
264                 return -EINVAL;
265         }
266         if (kq < 0) {
267                 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
268                 return -ENODEV;
269         }
270
271         rte_spinlock_lock(&intr_lock);
272
273         /* check if the insterrupt source for the fd is existent */
274         TAILQ_FOREACH(src, &intr_sources, next)
275                 if (src->intr_handle.fd == intr_handle->fd)
276                         break;
277
278         /* No interrupt source registered for the fd */
279         if (src == NULL) {
280                 ret = -ENOENT;
281
282         /* interrupt source has some active callbacks right now. */
283         } else if (src->active != 0) {
284                 ret = -EAGAIN;
285
286         /* ok to remove. */
287         } else {
288                 struct kevent ke;
289
290                 ret = 0;
291
292                 /* remove it from the kqueue */
293                 memset(&ke, 0, sizeof(ke));
294                 ke.flags = EV_DELETE; /* mark for deletion from the queue */
295
296                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
297                         RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
298                         ret = -ENODEV;
299                         goto out;
300                 }
301
302                 /**
303                  * remove intr file descriptor from wait list.
304                  */
305                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
306                         RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
307                                 src->intr_handle.fd, strerror(errno));
308                         /* removing non-existent even is an expected condition
309                          * in some circumstances (e.g. oneshot events).
310                          */
311                 }
312
313                 /*walk through the callbacks and remove all that match. */
314                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
315                         next = TAILQ_NEXT(cb, next);
316                         if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
317                                         cb->cb_arg == cb_arg)) {
318                                 TAILQ_REMOVE(&src->callbacks, cb, next);
319                                 free(cb);
320                                 ret++;
321                         }
322                 }
323
324                 /* all callbacks for that source are removed. */
325                 if (TAILQ_EMPTY(&src->callbacks)) {
326                         TAILQ_REMOVE(&intr_sources, src, next);
327                         free(src);
328                 }
329         }
330 out:
331         rte_spinlock_unlock(&intr_lock);
332
333         return ret;
334 }
335
336 int
337 rte_intr_enable(const struct rte_intr_handle *intr_handle)
338 {
339         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
340                 return 0;
341
342         if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
343                 return -1;
344
345         switch (intr_handle->type) {
346         /* not used at this moment */
347         case RTE_INTR_HANDLE_ALARM:
348                 return -1;
349         /* not used at this moment */
350         case RTE_INTR_HANDLE_DEV_EVENT:
351                 return -1;
352         /* unknown handle type */
353         default:
354                 RTE_LOG(ERR, EAL,
355                         "Unknown handle type of fd %d\n",
356                                         intr_handle->fd);
357                 return -1;
358         }
359
360         return 0;
361 }
362
363 int
364 rte_intr_disable(const struct rte_intr_handle *intr_handle)
365 {
366         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
367                 return 0;
368
369         if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
370                 return -1;
371
372         switch (intr_handle->type) {
373         /* not used at this moment */
374         case RTE_INTR_HANDLE_ALARM:
375                 return -1;
376         /* not used at this moment */
377         case RTE_INTR_HANDLE_DEV_EVENT:
378                 return -1;
379         /* unknown handle type */
380         default:
381                 RTE_LOG(ERR, EAL,
382                         "Unknown handle type of fd %d\n",
383                                         intr_handle->fd);
384                 return -1;
385         }
386
387         return 0;
388 }
389
390 int
391 rte_intr_ack(const struct rte_intr_handle *intr_handle)
392 {
393         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
394                 return 0;
395
396         return -1;
397 }
398
399 static void
400 eal_intr_process_interrupts(struct kevent *events, int nfds)
401 {
402         struct rte_intr_callback active_cb;
403         union rte_intr_read_buffer buf;
404         struct rte_intr_callback *cb, *next;
405         struct rte_intr_source *src;
406         bool call = false;
407         int n, bytes_read;
408         struct kevent ke;
409
410         for (n = 0; n < nfds; n++) {
411                 int event_fd = events[n].ident;
412
413                 rte_spinlock_lock(&intr_lock);
414                 TAILQ_FOREACH(src, &intr_sources, next)
415                         if (src->intr_handle.fd == event_fd)
416                                 break;
417                 if (src == NULL) {
418                         rte_spinlock_unlock(&intr_lock);
419                         continue;
420                 }
421
422                 /* mark this interrupt source as active and release the lock. */
423                 src->active = 1;
424                 rte_spinlock_unlock(&intr_lock);
425
426                 /* set the length to be read dor different handle type */
427                 switch (src->intr_handle.type) {
428                 case RTE_INTR_HANDLE_ALARM:
429                         bytes_read = 0;
430                         call = true;
431                         break;
432                 case RTE_INTR_HANDLE_VDEV:
433                 case RTE_INTR_HANDLE_EXT:
434                         bytes_read = 0;
435                         call = true;
436                         break;
437                 case RTE_INTR_HANDLE_DEV_EVENT:
438                         bytes_read = 0;
439                         call = true;
440                         break;
441                 default:
442                         bytes_read = 1;
443                         break;
444                 }
445
446                 if (bytes_read > 0) {
447                         /**
448                          * read out to clear the ready-to-be-read flag
449                          * for epoll_wait.
450                          */
451                         bytes_read = read(event_fd, &buf, bytes_read);
452                         if (bytes_read < 0) {
453                                 if (errno == EINTR || errno == EWOULDBLOCK)
454                                         continue;
455
456                                 RTE_LOG(ERR, EAL, "Error reading from file "
457                                         "descriptor %d: %s\n",
458                                         event_fd,
459                                         strerror(errno));
460                         } else if (bytes_read == 0)
461                                 RTE_LOG(ERR, EAL, "Read nothing from file "
462                                         "descriptor %d\n", event_fd);
463                         else
464                                 call = true;
465                 }
466
467                 /* grab a lock, again to call callbacks and update status. */
468                 rte_spinlock_lock(&intr_lock);
469
470                 if (call) {
471                         /* Finally, call all callbacks. */
472                         TAILQ_FOREACH(cb, &src->callbacks, next) {
473
474                                 /* make a copy and unlock. */
475                                 active_cb = *cb;
476                                 rte_spinlock_unlock(&intr_lock);
477
478                                 /* call the actual callback */
479                                 active_cb.cb_fn(active_cb.cb_arg);
480
481                                 /*get the lock back. */
482                                 rte_spinlock_lock(&intr_lock);
483                         }
484                 }
485
486                 /* we done with that interrupt source, release it. */
487                 src->active = 0;
488
489                 /* check if any callback are supposed to be removed */
490                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
491                         next = TAILQ_NEXT(cb, next);
492                         if (cb->pending_delete) {
493                                 /* remove it from the kqueue */
494                                 memset(&ke, 0, sizeof(ke));
495                                 /* mark for deletion from the queue */
496                                 ke.flags = EV_DELETE;
497
498                                 if (intr_source_to_kevent(&src->intr_handle, &ke) < 0) {
499                                         RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
500                                         rte_spinlock_unlock(&intr_lock);
501                                         return;
502                                 }
503
504                                 /**
505                                  * remove intr file descriptor from wait list.
506                                  */
507                                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
508                                         RTE_LOG(ERR, EAL, "Error removing fd %d kevent, "
509                                                 "%s\n", src->intr_handle.fd,
510                                                 strerror(errno));
511                                         /* removing non-existent even is an expected
512                                          * condition in some circumstances
513                                          * (e.g. oneshot events).
514                                          */
515                                 }
516
517                                 TAILQ_REMOVE(&src->callbacks, cb, next);
518                                 if (cb->ucb_fn)
519                                         cb->ucb_fn(&src->intr_handle, cb->cb_arg);
520                                 free(cb);
521                         }
522                 }
523
524                 /* all callbacks for that source are removed. */
525                 if (TAILQ_EMPTY(&src->callbacks)) {
526                         TAILQ_REMOVE(&intr_sources, src, next);
527                         free(src);
528                 }
529
530                 rte_spinlock_unlock(&intr_lock);
531         }
532 }
533
534 static void *
535 eal_intr_thread_main(void *arg __rte_unused)
536 {
537         struct kevent events[MAX_INTR_EVENTS];
538         int nfds;
539
540         /* host thread, never break out */
541         for (;;) {
542                 /* do not change anything, just wait */
543                 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
544
545                 /* kevent fail */
546                 if (nfds < 0) {
547                         if (errno == EINTR)
548                                 continue;
549                         RTE_LOG(ERR, EAL,
550                                 "kevent returns with fail\n");
551                         break;
552                 }
553                 /* kevent timeout, will never happen here */
554                 else if (nfds == 0)
555                         continue;
556
557                 /* kevent has at least one fd ready to read */
558                 eal_intr_process_interrupts(events, nfds);
559         }
560         close(kq);
561         kq = -1;
562         return NULL;
563 }
564
565 int
566 rte_eal_intr_init(void)
567 {
568         int ret = 0;
569
570         /* init the global interrupt source head */
571         TAILQ_INIT(&intr_sources);
572
573         kq = kqueue();
574         if (kq < 0) {
575                 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
576                 return -1;
577         }
578
579         /* create the host thread to wait/handle the interrupt */
580         ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
581                         eal_intr_thread_main, NULL);
582         if (ret != 0) {
583                 rte_errno = -ret;
584                 RTE_LOG(ERR, EAL,
585                         "Failed to create thread for interrupt handling\n");
586         }
587
588         return ret;
589 }
590
591 int
592 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
593                 int epfd, int op, unsigned int vec, void *data)
594 {
595         RTE_SET_USED(intr_handle);
596         RTE_SET_USED(epfd);
597         RTE_SET_USED(op);
598         RTE_SET_USED(vec);
599         RTE_SET_USED(data);
600
601         return -ENOTSUP;
602 }
603
604 int
605 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
606 {
607         RTE_SET_USED(intr_handle);
608         RTE_SET_USED(nb_efd);
609
610         return 0;
611 }
612
613 void
614 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
615 {
616         RTE_SET_USED(intr_handle);
617 }
618
619 int
620 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
621 {
622         RTE_SET_USED(intr_handle);
623         return 0;
624 }
625
626 int
627 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
628 {
629         RTE_SET_USED(intr_handle);
630         return 1;
631 }
632
633 int
634 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
635 {
636         RTE_SET_USED(intr_handle);
637         return 0;
638 }
639
640 int
641 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
642                 int maxevents, int timeout)
643 {
644         RTE_SET_USED(epfd);
645         RTE_SET_USED(events);
646         RTE_SET_USED(maxevents);
647         RTE_SET_USED(timeout);
648
649         return -ENOTSUP;
650 }
651
652 int
653 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
654 {
655         RTE_SET_USED(epfd);
656         RTE_SET_USED(op);
657         RTE_SET_USED(fd);
658         RTE_SET_USED(event);
659
660         return -ENOTSUP;
661 }
662
663 int
664 rte_intr_tls_epfd(void)
665 {
666         return -ENOTSUP;
667 }
668
669 void
670 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
671 {
672         RTE_SET_USED(intr_handle);
673 }
674
675 int rte_thread_is_intr(void)
676 {
677         return pthread_equal(intr_thread, pthread_self());
678 }