00991f26a94d57134a1d0d7272b5e7e3565b51b2
[dpdk.git] / lib / librte_eal / freebsd / eal / eal_interrupts.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/event.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
16
17 #include "eal_private.h"
18 #include "eal_alarm_private.h"
19
20 #define MAX_INTR_EVENTS 16
21
22 /**
23  * union buffer for reading on different devices
24  */
25 union rte_intr_read_buffer {
26         char charbuf[16];                /* for others */
27 };
28
29 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
30 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
31
32 struct rte_intr_callback {
33         TAILQ_ENTRY(rte_intr_callback) next;
34         rte_intr_callback_fn cb_fn;  /**< callback address */
35         void *cb_arg;                /**< parameter for callback */
36         uint8_t pending_delete;      /**< delete after callback is called */
37         rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
38 };
39
40 struct rte_intr_source {
41         TAILQ_ENTRY(rte_intr_source) next;
42         struct rte_intr_handle intr_handle; /**< interrupt handle */
43         struct rte_intr_cb_list callbacks;  /**< user callbacks */
44         uint32_t active;
45 };
46
47 /* global spinlock for interrupt data operation */
48 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
49
50 /* interrupt sources list */
51 static struct rte_intr_source_list intr_sources;
52
53 /* interrupt handling thread */
54 static pthread_t intr_thread;
55
56 static volatile int kq = -1;
57
58 static int
59 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
60 {
61         /* alarm callbacks are special case */
62         if (ih->type == RTE_INTR_HANDLE_ALARM) {
63                 uint64_t timeout_ns;
64
65                 /* get soonest alarm timeout */
66                 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
67                         return -1;
68
69                 ke->filter = EVFILT_TIMER;
70                 /* timers are one shot */
71                 ke->flags |= EV_ONESHOT;
72                 ke->fflags = NOTE_NSECONDS;
73                 ke->data = timeout_ns;
74         } else {
75                 ke->filter = EVFILT_READ;
76         }
77         ke->ident = ih->fd;
78
79         return 0;
80 }
81
82 int
83 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
84                 rte_intr_callback_fn cb, void *cb_arg)
85 {
86         struct rte_intr_callback *callback;
87         struct rte_intr_source *src;
88         int ret, add_event = 0;
89
90         /* first do parameter checking */
91         if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
92                 RTE_LOG(ERR, EAL,
93                         "Registering with invalid input parameter\n");
94                 return -EINVAL;
95         }
96         if (kq < 0) {
97                 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
98                 return -ENODEV;
99         }
100
101         rte_spinlock_lock(&intr_lock);
102
103         /* find the source for this intr_handle */
104         TAILQ_FOREACH(src, &intr_sources, next) {
105                 if (src->intr_handle.fd == intr_handle->fd)
106                         break;
107         }
108
109         /* if this is an alarm interrupt and it already has a callback,
110          * then we don't want to create a new callback because the only
111          * thing on the list should be eal_alarm_callback() and we may
112          * be called just to reset the timer.
113          */
114         if (src != NULL && src->intr_handle.type == RTE_INTR_HANDLE_ALARM &&
115                  !TAILQ_EMPTY(&src->callbacks)) {
116                 callback = NULL;
117         } else {
118                 /* allocate a new interrupt callback entity */
119                 callback = calloc(1, sizeof(*callback));
120                 if (callback == NULL) {
121                         RTE_LOG(ERR, EAL, "Can not allocate memory\n");
122                         ret = -ENOMEM;
123                         goto fail;
124                 }
125                 callback->cb_fn = cb;
126                 callback->cb_arg = cb_arg;
127                 callback->pending_delete = 0;
128                 callback->ucb_fn = NULL;
129
130                 if (src == NULL) {
131                         src = calloc(1, sizeof(*src));
132                         if (src == NULL) {
133                                 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
134                                 ret = -ENOMEM;
135                                 goto fail;
136                         } else {
137                                 src->intr_handle = *intr_handle;
138                                 TAILQ_INIT(&src->callbacks);
139                                 TAILQ_INSERT_TAIL(&intr_sources, src, next);
140                         }
141                 }
142
143                 /* we had no interrupts for this */
144                 if (TAILQ_EMPTY(&src->callbacks))
145                         add_event = 1;
146
147                 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
148         }
149
150         /* add events to the queue. timer events are special as we need to
151          * re-set the timer.
152          */
153         if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
154                 struct kevent ke;
155
156                 memset(&ke, 0, sizeof(ke));
157                 ke.flags = EV_ADD; /* mark for addition to the queue */
158
159                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
160                         RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
161                         ret = -ENODEV;
162                         goto fail;
163                 }
164
165                 /**
166                  * add the intr file descriptor into wait list.
167                  */
168                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
169                         /* currently, nic_uio does not support interrupts, so
170                          * this error will always be triggered and output to the
171                          * user. so, don't output it unless debug log level set.
172                          */
173                         if (errno == ENODEV)
174                                 RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
175                                         src->intr_handle.fd);
176                         else
177                                 RTE_LOG(ERR, EAL, "Error adding fd %d "
178                                                 "kevent, %s\n",
179                                                 src->intr_handle.fd,
180                                                 strerror(errno));
181                         ret = -errno;
182                         goto fail;
183                 }
184         }
185         rte_spinlock_unlock(&intr_lock);
186
187         return 0;
188 fail:
189         /* clean up */
190         if (src != NULL) {
191                 if (callback != NULL)
192                         TAILQ_REMOVE(&(src->callbacks), callback, next);
193                 if (TAILQ_EMPTY(&(src->callbacks))) {
194                         TAILQ_REMOVE(&intr_sources, src, next);
195                         free(src);
196                 }
197         }
198         free(callback);
199         rte_spinlock_unlock(&intr_lock);
200         return ret;
201 }
202
203 int
204 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
205                                 rte_intr_callback_fn cb_fn, void *cb_arg,
206                                 rte_intr_unregister_callback_fn ucb_fn)
207 {
208         int ret;
209         struct rte_intr_source *src;
210         struct rte_intr_callback *cb, *next;
211
212         /* do parameter checking first */
213         if (intr_handle == NULL || intr_handle->fd < 0) {
214                 RTE_LOG(ERR, EAL,
215                 "Unregistering with invalid input parameter\n");
216                 return -EINVAL;
217         }
218
219         if (kq < 0) {
220                 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
221                 return -ENODEV;
222         }
223
224         rte_spinlock_lock(&intr_lock);
225
226         /* check if the insterrupt source for the fd is existent */
227         TAILQ_FOREACH(src, &intr_sources, next)
228                 if (src->intr_handle.fd == intr_handle->fd)
229                         break;
230
231         /* No interrupt source registered for the fd */
232         if (src == NULL) {
233                 ret = -ENOENT;
234
235         /* only usable if the source is active */
236         } else if (src->active == 0) {
237                 ret = -EAGAIN;
238
239         } else {
240                 ret = 0;
241
242                 /* walk through the callbacks and mark all that match. */
243                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
244                         next = TAILQ_NEXT(cb, next);
245                         if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
246                                         cb->cb_arg == cb_arg)) {
247                                 cb->pending_delete = 1;
248                                 cb->ucb_fn = ucb_fn;
249                                 ret++;
250                         }
251                 }
252         }
253
254         rte_spinlock_unlock(&intr_lock);
255
256         return ret;
257 }
258
259 int
260 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
261                 rte_intr_callback_fn cb_fn, void *cb_arg)
262 {
263         int ret;
264         struct rte_intr_source *src;
265         struct rte_intr_callback *cb, *next;
266
267         /* do parameter checking first */
268         if (intr_handle == NULL || intr_handle->fd < 0) {
269                 RTE_LOG(ERR, EAL,
270                 "Unregistering with invalid input parameter\n");
271                 return -EINVAL;
272         }
273         if (kq < 0) {
274                 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
275                 return -ENODEV;
276         }
277
278         rte_spinlock_lock(&intr_lock);
279
280         /* check if the insterrupt source for the fd is existent */
281         TAILQ_FOREACH(src, &intr_sources, next)
282                 if (src->intr_handle.fd == intr_handle->fd)
283                         break;
284
285         /* No interrupt source registered for the fd */
286         if (src == NULL) {
287                 ret = -ENOENT;
288
289         /* interrupt source has some active callbacks right now. */
290         } else if (src->active != 0) {
291                 ret = -EAGAIN;
292
293         /* ok to remove. */
294         } else {
295                 struct kevent ke;
296
297                 ret = 0;
298
299                 /* remove it from the kqueue */
300                 memset(&ke, 0, sizeof(ke));
301                 ke.flags = EV_DELETE; /* mark for deletion from the queue */
302
303                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
304                         RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
305                         ret = -ENODEV;
306                         goto out;
307                 }
308
309                 /**
310                  * remove intr file descriptor from wait list.
311                  */
312                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
313                         RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
314                                 src->intr_handle.fd, strerror(errno));
315                         /* removing non-existent even is an expected condition
316                          * in some circumstances (e.g. oneshot events).
317                          */
318                 }
319
320                 /*walk through the callbacks and remove all that match. */
321                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
322                         next = TAILQ_NEXT(cb, next);
323                         if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
324                                         cb->cb_arg == cb_arg)) {
325                                 TAILQ_REMOVE(&src->callbacks, cb, next);
326                                 free(cb);
327                                 ret++;
328                         }
329                 }
330
331                 /* all callbacks for that source are removed. */
332                 if (TAILQ_EMPTY(&src->callbacks)) {
333                         TAILQ_REMOVE(&intr_sources, src, next);
334                         free(src);
335                 }
336         }
337 out:
338         rte_spinlock_unlock(&intr_lock);
339
340         return ret;
341 }
342
343 int
344 rte_intr_enable(const struct rte_intr_handle *intr_handle)
345 {
346         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
347                 return 0;
348
349         if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
350                 return -1;
351
352         switch (intr_handle->type) {
353         /* not used at this moment */
354         case RTE_INTR_HANDLE_ALARM:
355                 return -1;
356         /* not used at this moment */
357         case RTE_INTR_HANDLE_DEV_EVENT:
358                 return -1;
359         /* unknown handle type */
360         default:
361                 RTE_LOG(ERR, EAL,
362                         "Unknown handle type of fd %d\n",
363                                         intr_handle->fd);
364                 return -1;
365         }
366
367         return 0;
368 }
369
370 int
371 rte_intr_disable(const struct rte_intr_handle *intr_handle)
372 {
373         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
374                 return 0;
375
376         if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
377                 return -1;
378
379         switch (intr_handle->type) {
380         /* not used at this moment */
381         case RTE_INTR_HANDLE_ALARM:
382                 return -1;
383         /* not used at this moment */
384         case RTE_INTR_HANDLE_DEV_EVENT:
385                 return -1;
386         /* unknown handle type */
387         default:
388                 RTE_LOG(ERR, EAL,
389                         "Unknown handle type of fd %d\n",
390                                         intr_handle->fd);
391                 return -1;
392         }
393
394         return 0;
395 }
396
397 int
398 rte_intr_ack(const struct rte_intr_handle *intr_handle)
399 {
400         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
401                 return 0;
402
403         return -1;
404 }
405
406 static void
407 eal_intr_process_interrupts(struct kevent *events, int nfds)
408 {
409         struct rte_intr_callback active_cb;
410         union rte_intr_read_buffer buf;
411         struct rte_intr_callback *cb, *next;
412         struct rte_intr_source *src;
413         bool call = false;
414         int n, bytes_read;
415         struct kevent ke;
416
417         for (n = 0; n < nfds; n++) {
418                 int event_fd = events[n].ident;
419
420                 rte_spinlock_lock(&intr_lock);
421                 TAILQ_FOREACH(src, &intr_sources, next)
422                         if (src->intr_handle.fd == event_fd)
423                                 break;
424                 if (src == NULL) {
425                         rte_spinlock_unlock(&intr_lock);
426                         continue;
427                 }
428
429                 /* mark this interrupt source as active and release the lock. */
430                 src->active = 1;
431                 rte_spinlock_unlock(&intr_lock);
432
433                 /* set the length to be read dor different handle type */
434                 switch (src->intr_handle.type) {
435                 case RTE_INTR_HANDLE_ALARM:
436                         bytes_read = 0;
437                         call = true;
438                         break;
439                 case RTE_INTR_HANDLE_VDEV:
440                 case RTE_INTR_HANDLE_EXT:
441                         bytes_read = 0;
442                         call = true;
443                         break;
444                 case RTE_INTR_HANDLE_DEV_EVENT:
445                         bytes_read = 0;
446                         call = true;
447                         break;
448                 default:
449                         bytes_read = 1;
450                         break;
451                 }
452
453                 if (bytes_read > 0) {
454                         /**
455                          * read out to clear the ready-to-be-read flag
456                          * for epoll_wait.
457                          */
458                         bytes_read = read(event_fd, &buf, bytes_read);
459                         if (bytes_read < 0) {
460                                 if (errno == EINTR || errno == EWOULDBLOCK)
461                                         continue;
462
463                                 RTE_LOG(ERR, EAL, "Error reading from file "
464                                         "descriptor %d: %s\n",
465                                         event_fd,
466                                         strerror(errno));
467                         } else if (bytes_read == 0)
468                                 RTE_LOG(ERR, EAL, "Read nothing from file "
469                                         "descriptor %d\n", event_fd);
470                         else
471                                 call = true;
472                 }
473
474                 /* grab a lock, again to call callbacks and update status. */
475                 rte_spinlock_lock(&intr_lock);
476
477                 if (call) {
478                         /* Finally, call all callbacks. */
479                         TAILQ_FOREACH(cb, &src->callbacks, next) {
480
481                                 /* make a copy and unlock. */
482                                 active_cb = *cb;
483                                 rte_spinlock_unlock(&intr_lock);
484
485                                 /* call the actual callback */
486                                 active_cb.cb_fn(active_cb.cb_arg);
487
488                                 /*get the lock back. */
489                                 rte_spinlock_lock(&intr_lock);
490                         }
491                 }
492
493                 /* we done with that interrupt source, release it. */
494                 src->active = 0;
495
496                 /* check if any callback are supposed to be removed */
497                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
498                         next = TAILQ_NEXT(cb, next);
499                         if (cb->pending_delete) {
500                                 /* remove it from the kqueue */
501                                 memset(&ke, 0, sizeof(ke));
502                                 /* mark for deletion from the queue */
503                                 ke.flags = EV_DELETE;
504
505                                 if (intr_source_to_kevent(&src->intr_handle, &ke) < 0) {
506                                         RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
507                                         rte_spinlock_unlock(&intr_lock);
508                                         return;
509                                 }
510
511                                 /**
512                                  * remove intr file descriptor from wait list.
513                                  */
514                                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
515                                         RTE_LOG(ERR, EAL, "Error removing fd %d kevent, "
516                                                 "%s\n", src->intr_handle.fd,
517                                                 strerror(errno));
518                                         /* removing non-existent even is an expected
519                                          * condition in some circumstances
520                                          * (e.g. oneshot events).
521                                          */
522                                 }
523
524                                 TAILQ_REMOVE(&src->callbacks, cb, next);
525                                 if (cb->ucb_fn)
526                                         cb->ucb_fn(&src->intr_handle, cb->cb_arg);
527                                 free(cb);
528                         }
529                 }
530
531                 /* all callbacks for that source are removed. */
532                 if (TAILQ_EMPTY(&src->callbacks)) {
533                         TAILQ_REMOVE(&intr_sources, src, next);
534                         free(src);
535                 }
536
537                 rte_spinlock_unlock(&intr_lock);
538         }
539 }
540
541 static void *
542 eal_intr_thread_main(void *arg __rte_unused)
543 {
544         struct kevent events[MAX_INTR_EVENTS];
545         int nfds;
546
547         /* host thread, never break out */
548         for (;;) {
549                 /* do not change anything, just wait */
550                 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
551
552                 /* kevent fail */
553                 if (nfds < 0) {
554                         if (errno == EINTR)
555                                 continue;
556                         RTE_LOG(ERR, EAL,
557                                 "kevent returns with fail\n");
558                         break;
559                 }
560                 /* kevent timeout, will never happen here */
561                 else if (nfds == 0)
562                         continue;
563
564                 /* kevent has at least one fd ready to read */
565                 eal_intr_process_interrupts(events, nfds);
566         }
567         close(kq);
568         kq = -1;
569         return NULL;
570 }
571
572 int
573 rte_eal_intr_init(void)
574 {
575         int ret = 0;
576
577         /* init the global interrupt source head */
578         TAILQ_INIT(&intr_sources);
579
580         kq = kqueue();
581         if (kq < 0) {
582                 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
583                 return -1;
584         }
585
586         /* create the host thread to wait/handle the interrupt */
587         ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
588                         eal_intr_thread_main, NULL);
589         if (ret != 0) {
590                 rte_errno = -ret;
591                 RTE_LOG(ERR, EAL,
592                         "Failed to create thread for interrupt handling\n");
593         }
594
595         return ret;
596 }
597
598 int
599 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
600                 int epfd, int op, unsigned int vec, void *data)
601 {
602         RTE_SET_USED(intr_handle);
603         RTE_SET_USED(epfd);
604         RTE_SET_USED(op);
605         RTE_SET_USED(vec);
606         RTE_SET_USED(data);
607
608         return -ENOTSUP;
609 }
610
611 int
612 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
613 {
614         RTE_SET_USED(intr_handle);
615         RTE_SET_USED(nb_efd);
616
617         return 0;
618 }
619
620 void
621 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
622 {
623         RTE_SET_USED(intr_handle);
624 }
625
626 int
627 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
628 {
629         RTE_SET_USED(intr_handle);
630         return 0;
631 }
632
633 int
634 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
635 {
636         RTE_SET_USED(intr_handle);
637         return 1;
638 }
639
640 int
641 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
642 {
643         RTE_SET_USED(intr_handle);
644         return 0;
645 }
646
647 int
648 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
649                 int maxevents, int timeout)
650 {
651         RTE_SET_USED(epfd);
652         RTE_SET_USED(events);
653         RTE_SET_USED(maxevents);
654         RTE_SET_USED(timeout);
655
656         return -ENOTSUP;
657 }
658
659 int
660 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
661 {
662         RTE_SET_USED(epfd);
663         RTE_SET_USED(op);
664         RTE_SET_USED(fd);
665         RTE_SET_USED(event);
666
667         return -ENOTSUP;
668 }
669
670 int
671 rte_intr_tls_epfd(void)
672 {
673         return -ENOTSUP;
674 }
675
676 void
677 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
678 {
679         RTE_SET_USED(intr_handle);
680 }
681
682 int rte_thread_is_intr(void)
683 {
684         return pthread_equal(intr_thread, pthread_self());
685 }