eal/windows: add missing SPDX license tag
[dpdk.git] / lib / librte_eal / freebsd / eal_interrupts.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/event.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
16 #include <rte_eal_trace.h>
17
18 #include "eal_private.h"
19 #include "eal_alarm_private.h"
20
21 #define MAX_INTR_EVENTS 16
22
23 /**
24  * union buffer for reading on different devices
25  */
26 union rte_intr_read_buffer {
27         char charbuf[16];                /* for others */
28 };
29
30 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
31 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
32
33 struct rte_intr_callback {
34         TAILQ_ENTRY(rte_intr_callback) next;
35         rte_intr_callback_fn cb_fn;  /**< callback address */
36         void *cb_arg;                /**< parameter for callback */
37         uint8_t pending_delete;      /**< delete after callback is called */
38         rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
39 };
40
41 struct rte_intr_source {
42         TAILQ_ENTRY(rte_intr_source) next;
43         struct rte_intr_handle intr_handle; /**< interrupt handle */
44         struct rte_intr_cb_list callbacks;  /**< user callbacks */
45         uint32_t active;
46 };
47
48 /* global spinlock for interrupt data operation */
49 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
50
51 /* interrupt sources list */
52 static struct rte_intr_source_list intr_sources;
53
54 /* interrupt handling thread */
55 static pthread_t intr_thread;
56
57 static volatile int kq = -1;
58
59 static int
60 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
61 {
62         /* alarm callbacks are special case */
63         if (ih->type == RTE_INTR_HANDLE_ALARM) {
64                 uint64_t timeout_ns;
65
66                 /* get soonest alarm timeout */
67                 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
68                         return -1;
69
70                 ke->filter = EVFILT_TIMER;
71                 /* timers are one shot */
72                 ke->flags |= EV_ONESHOT;
73                 ke->fflags = NOTE_NSECONDS;
74                 ke->data = timeout_ns;
75         } else {
76                 ke->filter = EVFILT_READ;
77         }
78         ke->ident = ih->fd;
79
80         return 0;
81 }
82
83 int
84 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
85                 rte_intr_callback_fn cb, void *cb_arg)
86 {
87         struct rte_intr_callback *callback;
88         struct rte_intr_source *src;
89         int ret = 0, add_event = 0;
90
91         /* first do parameter checking */
92         if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
93                 RTE_LOG(ERR, EAL,
94                         "Registering with invalid input parameter\n");
95                 return -EINVAL;
96         }
97         if (kq < 0) {
98                 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
99                 return -ENODEV;
100         }
101
102         rte_spinlock_lock(&intr_lock);
103
104         /* find the source for this intr_handle */
105         TAILQ_FOREACH(src, &intr_sources, next) {
106                 if (src->intr_handle.fd == intr_handle->fd)
107                         break;
108         }
109
110         /* if this is an alarm interrupt and it already has a callback,
111          * then we don't want to create a new callback because the only
112          * thing on the list should be eal_alarm_callback() and we may
113          * be called just to reset the timer.
114          */
115         if (src != NULL && src->intr_handle.type == RTE_INTR_HANDLE_ALARM &&
116                  !TAILQ_EMPTY(&src->callbacks)) {
117                 callback = NULL;
118         } else {
119                 /* allocate a new interrupt callback entity */
120                 callback = calloc(1, sizeof(*callback));
121                 if (callback == NULL) {
122                         RTE_LOG(ERR, EAL, "Can not allocate memory\n");
123                         ret = -ENOMEM;
124                         goto fail;
125                 }
126                 callback->cb_fn = cb;
127                 callback->cb_arg = cb_arg;
128                 callback->pending_delete = 0;
129                 callback->ucb_fn = NULL;
130
131                 if (src == NULL) {
132                         src = calloc(1, sizeof(*src));
133                         if (src == NULL) {
134                                 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
135                                 ret = -ENOMEM;
136                                 goto fail;
137                         } else {
138                                 src->intr_handle = *intr_handle;
139                                 TAILQ_INIT(&src->callbacks);
140                                 TAILQ_INSERT_TAIL(&intr_sources, src, next);
141                         }
142                 }
143
144                 /* we had no interrupts for this */
145                 if (TAILQ_EMPTY(&src->callbacks))
146                         add_event = 1;
147
148                 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
149         }
150
151         /* add events to the queue. timer events are special as we need to
152          * re-set the timer.
153          */
154         if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
155                 struct kevent ke;
156
157                 memset(&ke, 0, sizeof(ke));
158                 ke.flags = EV_ADD; /* mark for addition to the queue */
159
160                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
161                         RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
162                         ret = -ENODEV;
163                         goto fail;
164                 }
165
166                 /**
167                  * add the intr file descriptor into wait list.
168                  */
169                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
170                         /* currently, nic_uio does not support interrupts, so
171                          * this error will always be triggered and output to the
172                          * user. so, don't output it unless debug log level set.
173                          */
174                         if (errno == ENODEV)
175                                 RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
176                                         src->intr_handle.fd);
177                         else
178                                 RTE_LOG(ERR, EAL, "Error adding fd %d "
179                                                 "kevent, %s\n",
180                                                 src->intr_handle.fd,
181                                                 strerror(errno));
182                         ret = -errno;
183                         goto fail;
184                 }
185         }
186         rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
187         rte_spinlock_unlock(&intr_lock);
188
189         return 0;
190 fail:
191         /* clean up */
192         if (src != NULL) {
193                 if (callback != NULL)
194                         TAILQ_REMOVE(&(src->callbacks), callback, next);
195                 if (TAILQ_EMPTY(&(src->callbacks))) {
196                         TAILQ_REMOVE(&intr_sources, src, next);
197                         free(src);
198                 }
199         }
200         free(callback);
201         rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
202         rte_spinlock_unlock(&intr_lock);
203         return ret;
204 }
205
206 int
207 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
208                                 rte_intr_callback_fn cb_fn, void *cb_arg,
209                                 rte_intr_unregister_callback_fn ucb_fn)
210 {
211         int ret;
212         struct rte_intr_source *src;
213         struct rte_intr_callback *cb, *next;
214
215         /* do parameter checking first */
216         if (intr_handle == NULL || intr_handle->fd < 0) {
217                 RTE_LOG(ERR, EAL,
218                 "Unregistering with invalid input parameter\n");
219                 return -EINVAL;
220         }
221
222         if (kq < 0) {
223                 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
224                 return -ENODEV;
225         }
226
227         rte_spinlock_lock(&intr_lock);
228
229         /* check if the insterrupt source for the fd is existent */
230         TAILQ_FOREACH(src, &intr_sources, next)
231                 if (src->intr_handle.fd == intr_handle->fd)
232                         break;
233
234         /* No interrupt source registered for the fd */
235         if (src == NULL) {
236                 ret = -ENOENT;
237
238         /* only usable if the source is active */
239         } else if (src->active == 0) {
240                 ret = -EAGAIN;
241
242         } else {
243                 ret = 0;
244
245                 /* walk through the callbacks and mark all that match. */
246                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
247                         next = TAILQ_NEXT(cb, next);
248                         if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
249                                         cb->cb_arg == cb_arg)) {
250                                 cb->pending_delete = 1;
251                                 cb->ucb_fn = ucb_fn;
252                                 ret++;
253                         }
254                 }
255         }
256
257         rte_spinlock_unlock(&intr_lock);
258
259         return ret;
260 }
261
262 int
263 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
264                 rte_intr_callback_fn cb_fn, void *cb_arg)
265 {
266         int ret;
267         struct rte_intr_source *src;
268         struct rte_intr_callback *cb, *next;
269
270         /* do parameter checking first */
271         if (intr_handle == NULL || intr_handle->fd < 0) {
272                 RTE_LOG(ERR, EAL,
273                 "Unregistering with invalid input parameter\n");
274                 return -EINVAL;
275         }
276         if (kq < 0) {
277                 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
278                 return -ENODEV;
279         }
280
281         rte_spinlock_lock(&intr_lock);
282
283         /* check if the insterrupt source for the fd is existent */
284         TAILQ_FOREACH(src, &intr_sources, next)
285                 if (src->intr_handle.fd == intr_handle->fd)
286                         break;
287
288         /* No interrupt source registered for the fd */
289         if (src == NULL) {
290                 ret = -ENOENT;
291
292         /* interrupt source has some active callbacks right now. */
293         } else if (src->active != 0) {
294                 ret = -EAGAIN;
295
296         /* ok to remove. */
297         } else {
298                 struct kevent ke;
299
300                 ret = 0;
301
302                 /* remove it from the kqueue */
303                 memset(&ke, 0, sizeof(ke));
304                 ke.flags = EV_DELETE; /* mark for deletion from the queue */
305
306                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
307                         RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
308                         ret = -ENODEV;
309                         goto out;
310                 }
311
312                 /**
313                  * remove intr file descriptor from wait list.
314                  */
315                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
316                         RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
317                                 src->intr_handle.fd, strerror(errno));
318                         /* removing non-existent even is an expected condition
319                          * in some circumstances (e.g. oneshot events).
320                          */
321                 }
322
323                 /*walk through the callbacks and remove all that match. */
324                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
325                         next = TAILQ_NEXT(cb, next);
326                         if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
327                                         cb->cb_arg == cb_arg)) {
328                                 TAILQ_REMOVE(&src->callbacks, cb, next);
329                                 free(cb);
330                                 ret++;
331                         }
332                 }
333
334                 /* all callbacks for that source are removed. */
335                 if (TAILQ_EMPTY(&src->callbacks)) {
336                         TAILQ_REMOVE(&intr_sources, src, next);
337                         free(src);
338                 }
339         }
340 out:
341         rte_eal_trace_intr_callback_unregister(intr_handle, cb_fn, cb_arg,
342                 ret);
343         rte_spinlock_unlock(&intr_lock);
344
345         return ret;
346 }
347
348 int
349 rte_intr_enable(const struct rte_intr_handle *intr_handle)
350 {
351         int rc = 0;
352
353         if (intr_handle == NULL)
354                 return -1;
355
356         if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
357                 rc = 0;
358                 goto out;
359         }
360
361         if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
362                 rc = -1;
363                 goto out;
364         }
365
366         switch (intr_handle->type) {
367         /* not used at this moment */
368         case RTE_INTR_HANDLE_ALARM:
369                 rc = -1;
370                 break;
371         /* not used at this moment */
372         case RTE_INTR_HANDLE_DEV_EVENT:
373                 rc = -1;
374                 break;
375         /* unknown handle type */
376         default:
377                 RTE_LOG(ERR, EAL,
378                         "Unknown handle type of fd %d\n",
379                                         intr_handle->fd);
380                 rc = -1;
381                 break;
382         }
383
384 out:
385         rte_eal_trace_intr_enable(intr_handle, rc);
386         return rc;
387 }
388
389 int
390 rte_intr_disable(const struct rte_intr_handle *intr_handle)
391 {
392         int rc = 0;
393
394         if (intr_handle == NULL)
395                 return -1;
396
397         if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
398                 rc = 0;
399                 goto out;
400         }
401
402         if (intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0) {
403                 rc = -1;
404                 goto out;
405         }
406
407         switch (intr_handle->type) {
408         /* not used at this moment */
409         case RTE_INTR_HANDLE_ALARM:
410                 rc = -1;
411                 break;
412         /* not used at this moment */
413         case RTE_INTR_HANDLE_DEV_EVENT:
414                 rc = -1;
415                 break;
416         /* unknown handle type */
417         default:
418                 RTE_LOG(ERR, EAL,
419                         "Unknown handle type of fd %d\n",
420                                         intr_handle->fd);
421                 rc = -1;
422                 break;
423         }
424 out:
425         rte_eal_trace_intr_disable(intr_handle, rc);
426         return rc;
427 }
428
429 int
430 rte_intr_ack(const struct rte_intr_handle *intr_handle)
431 {
432         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
433                 return 0;
434
435         return -1;
436 }
437
438 static void
439 eal_intr_process_interrupts(struct kevent *events, int nfds)
440 {
441         struct rte_intr_callback active_cb;
442         union rte_intr_read_buffer buf;
443         struct rte_intr_callback *cb, *next;
444         struct rte_intr_source *src;
445         bool call = false;
446         int n, bytes_read;
447         struct kevent ke;
448
449         for (n = 0; n < nfds; n++) {
450                 int event_fd = events[n].ident;
451
452                 rte_spinlock_lock(&intr_lock);
453                 TAILQ_FOREACH(src, &intr_sources, next)
454                         if (src->intr_handle.fd == event_fd)
455                                 break;
456                 if (src == NULL) {
457                         rte_spinlock_unlock(&intr_lock);
458                         continue;
459                 }
460
461                 /* mark this interrupt source as active and release the lock. */
462                 src->active = 1;
463                 rte_spinlock_unlock(&intr_lock);
464
465                 /* set the length to be read dor different handle type */
466                 switch (src->intr_handle.type) {
467                 case RTE_INTR_HANDLE_ALARM:
468                         bytes_read = 0;
469                         call = true;
470                         break;
471                 case RTE_INTR_HANDLE_VDEV:
472                 case RTE_INTR_HANDLE_EXT:
473                         bytes_read = 0;
474                         call = true;
475                         break;
476                 case RTE_INTR_HANDLE_DEV_EVENT:
477                         bytes_read = 0;
478                         call = true;
479                         break;
480                 default:
481                         bytes_read = 1;
482                         break;
483                 }
484
485                 if (bytes_read > 0) {
486                         /**
487                          * read out to clear the ready-to-be-read flag
488                          * for epoll_wait.
489                          */
490                         bytes_read = read(event_fd, &buf, bytes_read);
491                         if (bytes_read < 0) {
492                                 if (errno == EINTR || errno == EWOULDBLOCK)
493                                         continue;
494
495                                 RTE_LOG(ERR, EAL, "Error reading from file "
496                                         "descriptor %d: %s\n",
497                                         event_fd,
498                                         strerror(errno));
499                         } else if (bytes_read == 0)
500                                 RTE_LOG(ERR, EAL, "Read nothing from file "
501                                         "descriptor %d\n", event_fd);
502                         else
503                                 call = true;
504                 }
505
506                 /* grab a lock, again to call callbacks and update status. */
507                 rte_spinlock_lock(&intr_lock);
508
509                 if (call) {
510                         /* Finally, call all callbacks. */
511                         TAILQ_FOREACH(cb, &src->callbacks, next) {
512
513                                 /* make a copy and unlock. */
514                                 active_cb = *cb;
515                                 rte_spinlock_unlock(&intr_lock);
516
517                                 /* call the actual callback */
518                                 active_cb.cb_fn(active_cb.cb_arg);
519
520                                 /*get the lock back. */
521                                 rte_spinlock_lock(&intr_lock);
522                         }
523                 }
524
525                 /* we done with that interrupt source, release it. */
526                 src->active = 0;
527
528                 /* check if any callback are supposed to be removed */
529                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
530                         next = TAILQ_NEXT(cb, next);
531                         if (cb->pending_delete) {
532                                 /* remove it from the kqueue */
533                                 memset(&ke, 0, sizeof(ke));
534                                 /* mark for deletion from the queue */
535                                 ke.flags = EV_DELETE;
536
537                                 if (intr_source_to_kevent(&src->intr_handle, &ke) < 0) {
538                                         RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
539                                         rte_spinlock_unlock(&intr_lock);
540                                         return;
541                                 }
542
543                                 /**
544                                  * remove intr file descriptor from wait list.
545                                  */
546                                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
547                                         RTE_LOG(ERR, EAL, "Error removing fd %d kevent, "
548                                                 "%s\n", src->intr_handle.fd,
549                                                 strerror(errno));
550                                         /* removing non-existent even is an expected
551                                          * condition in some circumstances
552                                          * (e.g. oneshot events).
553                                          */
554                                 }
555
556                                 TAILQ_REMOVE(&src->callbacks, cb, next);
557                                 if (cb->ucb_fn)
558                                         cb->ucb_fn(&src->intr_handle, cb->cb_arg);
559                                 free(cb);
560                         }
561                 }
562
563                 /* all callbacks for that source are removed. */
564                 if (TAILQ_EMPTY(&src->callbacks)) {
565                         TAILQ_REMOVE(&intr_sources, src, next);
566                         free(src);
567                 }
568
569                 rte_spinlock_unlock(&intr_lock);
570         }
571 }
572
573 static void *
574 eal_intr_thread_main(void *arg __rte_unused)
575 {
576         struct kevent events[MAX_INTR_EVENTS];
577         int nfds;
578
579         /* host thread, never break out */
580         for (;;) {
581                 /* do not change anything, just wait */
582                 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
583
584                 /* kevent fail */
585                 if (nfds < 0) {
586                         if (errno == EINTR)
587                                 continue;
588                         RTE_LOG(ERR, EAL,
589                                 "kevent returns with fail\n");
590                         break;
591                 }
592                 /* kevent timeout, will never happen here */
593                 else if (nfds == 0)
594                         continue;
595
596                 /* kevent has at least one fd ready to read */
597                 eal_intr_process_interrupts(events, nfds);
598         }
599         close(kq);
600         kq = -1;
601         return NULL;
602 }
603
604 int
605 rte_eal_intr_init(void)
606 {
607         int ret = 0;
608
609         /* init the global interrupt source head */
610         TAILQ_INIT(&intr_sources);
611
612         kq = kqueue();
613         if (kq < 0) {
614                 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
615                 return -1;
616         }
617
618         /* create the host thread to wait/handle the interrupt */
619         ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
620                         eal_intr_thread_main, NULL);
621         if (ret != 0) {
622                 rte_errno = -ret;
623                 RTE_LOG(ERR, EAL,
624                         "Failed to create thread for interrupt handling\n");
625         }
626
627         return ret;
628 }
629
630 int
631 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
632                 int epfd, int op, unsigned int vec, void *data)
633 {
634         RTE_SET_USED(intr_handle);
635         RTE_SET_USED(epfd);
636         RTE_SET_USED(op);
637         RTE_SET_USED(vec);
638         RTE_SET_USED(data);
639
640         return -ENOTSUP;
641 }
642
643 int
644 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
645 {
646         RTE_SET_USED(intr_handle);
647         RTE_SET_USED(nb_efd);
648
649         return 0;
650 }
651
652 void
653 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
654 {
655         RTE_SET_USED(intr_handle);
656 }
657
658 int
659 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
660 {
661         RTE_SET_USED(intr_handle);
662         return 0;
663 }
664
665 int
666 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
667 {
668         RTE_SET_USED(intr_handle);
669         return 1;
670 }
671
672 int
673 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
674 {
675         RTE_SET_USED(intr_handle);
676         return 0;
677 }
678
679 int
680 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
681                 int maxevents, int timeout)
682 {
683         RTE_SET_USED(epfd);
684         RTE_SET_USED(events);
685         RTE_SET_USED(maxevents);
686         RTE_SET_USED(timeout);
687
688         return -ENOTSUP;
689 }
690
691 int
692 rte_epoll_wait_interruptible(int epfd, struct rte_epoll_event *events,
693                              int maxevents, int timeout)
694 {
695         RTE_SET_USED(epfd);
696         RTE_SET_USED(events);
697         RTE_SET_USED(maxevents);
698         RTE_SET_USED(timeout);
699
700         return -ENOTSUP;
701 }
702
703 int
704 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
705 {
706         RTE_SET_USED(epfd);
707         RTE_SET_USED(op);
708         RTE_SET_USED(fd);
709         RTE_SET_USED(event);
710
711         return -ENOTSUP;
712 }
713
714 int
715 rte_intr_tls_epfd(void)
716 {
717         return -ENOTSUP;
718 }
719
720 void
721 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
722 {
723         RTE_SET_USED(intr_handle);
724 }
725
726 int rte_thread_is_intr(void)
727 {
728         return pthread_equal(intr_thread, pthread_self());
729 }