examples/fips_validation: fix buffer overflow
[dpdk.git] / lib / librte_eal / freebsd / eal_interrupts.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/event.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
16 #include <rte_eal_trace.h>
17
18 #include "eal_private.h"
19 #include "eal_alarm_private.h"
20
21 #define MAX_INTR_EVENTS 16
22
23 /**
24  * union buffer for reading on different devices
25  */
26 union rte_intr_read_buffer {
27         char charbuf[16];                /* for others */
28 };
29
30 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
31 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
32
33 struct rte_intr_callback {
34         TAILQ_ENTRY(rte_intr_callback) next;
35         rte_intr_callback_fn cb_fn;  /**< callback address */
36         void *cb_arg;                /**< parameter for callback */
37         uint8_t pending_delete;      /**< delete after callback is called */
38         rte_intr_unregister_callback_fn ucb_fn; /**< fn to call before cb is deleted */
39 };
40
41 struct rte_intr_source {
42         TAILQ_ENTRY(rte_intr_source) next;
43         struct rte_intr_handle intr_handle; /**< interrupt handle */
44         struct rte_intr_cb_list callbacks;  /**< user callbacks */
45         uint32_t active;
46 };
47
48 /* global spinlock for interrupt data operation */
49 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
50
51 /* interrupt sources list */
52 static struct rte_intr_source_list intr_sources;
53
54 /* interrupt handling thread */
55 static pthread_t intr_thread;
56
57 static volatile int kq = -1;
58
59 static int
60 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
61 {
62         /* alarm callbacks are special case */
63         if (ih->type == RTE_INTR_HANDLE_ALARM) {
64                 uint64_t timeout_ns;
65
66                 /* get soonest alarm timeout */
67                 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
68                         return -1;
69
70                 ke->filter = EVFILT_TIMER;
71                 /* timers are one shot */
72                 ke->flags |= EV_ONESHOT;
73                 ke->fflags = NOTE_NSECONDS;
74                 ke->data = timeout_ns;
75         } else {
76                 ke->filter = EVFILT_READ;
77         }
78         ke->ident = ih->fd;
79
80         return 0;
81 }
82
83 int
84 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
85                 rte_intr_callback_fn cb, void *cb_arg)
86 {
87         struct rte_intr_callback *callback;
88         struct rte_intr_source *src;
89         int ret = 0, add_event = 0;
90
91         /* first do parameter checking */
92         if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
93                 RTE_LOG(ERR, EAL,
94                         "Registering with invalid input parameter\n");
95                 return -EINVAL;
96         }
97         if (kq < 0) {
98                 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
99                 return -ENODEV;
100         }
101
102         rte_spinlock_lock(&intr_lock);
103
104         /* find the source for this intr_handle */
105         TAILQ_FOREACH(src, &intr_sources, next) {
106                 if (src->intr_handle.fd == intr_handle->fd)
107                         break;
108         }
109
110         /* if this is an alarm interrupt and it already has a callback,
111          * then we don't want to create a new callback because the only
112          * thing on the list should be eal_alarm_callback() and we may
113          * be called just to reset the timer.
114          */
115         if (src != NULL && src->intr_handle.type == RTE_INTR_HANDLE_ALARM &&
116                  !TAILQ_EMPTY(&src->callbacks)) {
117                 callback = NULL;
118         } else {
119                 /* allocate a new interrupt callback entity */
120                 callback = calloc(1, sizeof(*callback));
121                 if (callback == NULL) {
122                         RTE_LOG(ERR, EAL, "Can not allocate memory\n");
123                         ret = -ENOMEM;
124                         goto fail;
125                 }
126                 callback->cb_fn = cb;
127                 callback->cb_arg = cb_arg;
128                 callback->pending_delete = 0;
129                 callback->ucb_fn = NULL;
130
131                 if (src == NULL) {
132                         src = calloc(1, sizeof(*src));
133                         if (src == NULL) {
134                                 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
135                                 ret = -ENOMEM;
136                                 goto fail;
137                         } else {
138                                 src->intr_handle = *intr_handle;
139                                 TAILQ_INIT(&src->callbacks);
140                                 TAILQ_INSERT_TAIL(&intr_sources, src, next);
141                         }
142                 }
143
144                 /* we had no interrupts for this */
145                 if (TAILQ_EMPTY(&src->callbacks))
146                         add_event = 1;
147
148                 TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
149         }
150
151         /* add events to the queue. timer events are special as we need to
152          * re-set the timer.
153          */
154         if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
155                 struct kevent ke;
156
157                 memset(&ke, 0, sizeof(ke));
158                 ke.flags = EV_ADD; /* mark for addition to the queue */
159
160                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
161                         RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
162                         ret = -ENODEV;
163                         goto fail;
164                 }
165
166                 /**
167                  * add the intr file descriptor into wait list.
168                  */
169                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
170                         /* currently, nic_uio does not support interrupts, so
171                          * this error will always be triggered and output to the
172                          * user. so, don't output it unless debug log level set.
173                          */
174                         if (errno == ENODEV)
175                                 RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
176                                         src->intr_handle.fd);
177                         else
178                                 RTE_LOG(ERR, EAL, "Error adding fd %d "
179                                                 "kevent, %s\n",
180                                                 src->intr_handle.fd,
181                                                 strerror(errno));
182                         ret = -errno;
183                         goto fail;
184                 }
185         }
186         rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
187         rte_spinlock_unlock(&intr_lock);
188
189         return 0;
190 fail:
191         /* clean up */
192         if (src != NULL) {
193                 if (callback != NULL)
194                         TAILQ_REMOVE(&(src->callbacks), callback, next);
195                 if (TAILQ_EMPTY(&(src->callbacks))) {
196                         TAILQ_REMOVE(&intr_sources, src, next);
197                         free(src);
198                 }
199         }
200         free(callback);
201         rte_eal_trace_intr_callback_register(intr_handle, cb, cb_arg, ret);
202         rte_spinlock_unlock(&intr_lock);
203         return ret;
204 }
205
206 int
207 rte_intr_callback_unregister_pending(const struct rte_intr_handle *intr_handle,
208                                 rte_intr_callback_fn cb_fn, void *cb_arg,
209                                 rte_intr_unregister_callback_fn ucb_fn)
210 {
211         int ret;
212         struct rte_intr_source *src;
213         struct rte_intr_callback *cb, *next;
214
215         /* do parameter checking first */
216         if (intr_handle == NULL || intr_handle->fd < 0) {
217                 RTE_LOG(ERR, EAL,
218                 "Unregistering with invalid input parameter\n");
219                 return -EINVAL;
220         }
221
222         if (kq < 0) {
223                 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
224                 return -ENODEV;
225         }
226
227         rte_spinlock_lock(&intr_lock);
228
229         /* check if the insterrupt source for the fd is existent */
230         TAILQ_FOREACH(src, &intr_sources, next)
231                 if (src->intr_handle.fd == intr_handle->fd)
232                         break;
233
234         /* No interrupt source registered for the fd */
235         if (src == NULL) {
236                 ret = -ENOENT;
237
238         /* only usable if the source is active */
239         } else if (src->active == 0) {
240                 ret = -EAGAIN;
241
242         } else {
243                 ret = 0;
244
245                 /* walk through the callbacks and mark all that match. */
246                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
247                         next = TAILQ_NEXT(cb, next);
248                         if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
249                                         cb->cb_arg == cb_arg)) {
250                                 cb->pending_delete = 1;
251                                 cb->ucb_fn = ucb_fn;
252                                 ret++;
253                         }
254                 }
255         }
256
257         rte_spinlock_unlock(&intr_lock);
258
259         return ret;
260 }
261
262 int
263 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
264                 rte_intr_callback_fn cb_fn, void *cb_arg)
265 {
266         int ret;
267         struct rte_intr_source *src;
268         struct rte_intr_callback *cb, *next;
269
270         /* do parameter checking first */
271         if (intr_handle == NULL || intr_handle->fd < 0) {
272                 RTE_LOG(ERR, EAL,
273                 "Unregistering with invalid input parameter\n");
274                 return -EINVAL;
275         }
276         if (kq < 0) {
277                 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
278                 return -ENODEV;
279         }
280
281         rte_spinlock_lock(&intr_lock);
282
283         /* check if the insterrupt source for the fd is existent */
284         TAILQ_FOREACH(src, &intr_sources, next)
285                 if (src->intr_handle.fd == intr_handle->fd)
286                         break;
287
288         /* No interrupt source registered for the fd */
289         if (src == NULL) {
290                 ret = -ENOENT;
291
292         /* interrupt source has some active callbacks right now. */
293         } else if (src->active != 0) {
294                 ret = -EAGAIN;
295
296         /* ok to remove. */
297         } else {
298                 struct kevent ke;
299
300                 ret = 0;
301
302                 /* remove it from the kqueue */
303                 memset(&ke, 0, sizeof(ke));
304                 ke.flags = EV_DELETE; /* mark for deletion from the queue */
305
306                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
307                         RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
308                         ret = -ENODEV;
309                         goto out;
310                 }
311
312                 /**
313                  * remove intr file descriptor from wait list.
314                  */
315                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
316                         RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
317                                 src->intr_handle.fd, strerror(errno));
318                         /* removing non-existent even is an expected condition
319                          * in some circumstances (e.g. oneshot events).
320                          */
321                 }
322
323                 /*walk through the callbacks and remove all that match. */
324                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
325                         next = TAILQ_NEXT(cb, next);
326                         if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
327                                         cb->cb_arg == cb_arg)) {
328                                 TAILQ_REMOVE(&src->callbacks, cb, next);
329                                 free(cb);
330                                 ret++;
331                         }
332                 }
333
334                 /* all callbacks for that source are removed. */
335                 if (TAILQ_EMPTY(&src->callbacks)) {
336                         TAILQ_REMOVE(&intr_sources, src, next);
337                         free(src);
338                 }
339         }
340 out:
341         rte_eal_trace_intr_callback_unregister(intr_handle, cb_fn, cb_arg,
342                 ret);
343         rte_spinlock_unlock(&intr_lock);
344
345         return ret;
346 }
347
348 int
349 rte_intr_enable(const struct rte_intr_handle *intr_handle)
350 {
351         int rc = 0;
352
353         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV) {
354                 rc = 0;
355                 goto out;
356         }
357
358         if (!intr_handle || intr_handle->fd < 0 ||
359                                 intr_handle->uio_cfg_fd < 0) {
360                 rc = -1;
361                 goto out;
362         }
363
364         switch (intr_handle->type) {
365         /* not used at this moment */
366         case RTE_INTR_HANDLE_ALARM:
367                 rc = -1;
368                 break;
369         /* not used at this moment */
370         case RTE_INTR_HANDLE_DEV_EVENT:
371                 rc = -1;
372                 break;
373         /* unknown handle type */
374         default:
375                 RTE_LOG(ERR, EAL,
376                         "Unknown handle type of fd %d\n",
377                                         intr_handle->fd);
378                 rc = -1;
379                 break;
380         }
381
382 out:
383         rte_eal_trace_intr_enable(intr_handle, rc);
384         return rc;
385 }
386
387 int
388 rte_intr_disable(const struct rte_intr_handle *intr_handle)
389 {
390         int rc = 0;
391
392         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV) {
393                 rc = 0;
394                 goto out;
395         }
396
397         if (!intr_handle || intr_handle->fd < 0 ||
398                                 intr_handle->uio_cfg_fd < 0) {
399                 rc = -1;
400                 goto out;
401         }
402
403         switch (intr_handle->type) {
404         /* not used at this moment */
405         case RTE_INTR_HANDLE_ALARM:
406                 rc = -1;
407                 break;
408         /* not used at this moment */
409         case RTE_INTR_HANDLE_DEV_EVENT:
410                 rc = -1;
411                 break;
412         /* unknown handle type */
413         default:
414                 RTE_LOG(ERR, EAL,
415                         "Unknown handle type of fd %d\n",
416                                         intr_handle->fd);
417                 rc = -1;
418                 break;
419         }
420 out:
421         rte_eal_trace_intr_disable(intr_handle, rc);
422         return rc;
423 }
424
425 int
426 rte_intr_ack(const struct rte_intr_handle *intr_handle)
427 {
428         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
429                 return 0;
430
431         return -1;
432 }
433
434 static void
435 eal_intr_process_interrupts(struct kevent *events, int nfds)
436 {
437         struct rte_intr_callback active_cb;
438         union rte_intr_read_buffer buf;
439         struct rte_intr_callback *cb, *next;
440         struct rte_intr_source *src;
441         bool call = false;
442         int n, bytes_read;
443         struct kevent ke;
444
445         for (n = 0; n < nfds; n++) {
446                 int event_fd = events[n].ident;
447
448                 rte_spinlock_lock(&intr_lock);
449                 TAILQ_FOREACH(src, &intr_sources, next)
450                         if (src->intr_handle.fd == event_fd)
451                                 break;
452                 if (src == NULL) {
453                         rte_spinlock_unlock(&intr_lock);
454                         continue;
455                 }
456
457                 /* mark this interrupt source as active and release the lock. */
458                 src->active = 1;
459                 rte_spinlock_unlock(&intr_lock);
460
461                 /* set the length to be read dor different handle type */
462                 switch (src->intr_handle.type) {
463                 case RTE_INTR_HANDLE_ALARM:
464                         bytes_read = 0;
465                         call = true;
466                         break;
467                 case RTE_INTR_HANDLE_VDEV:
468                 case RTE_INTR_HANDLE_EXT:
469                         bytes_read = 0;
470                         call = true;
471                         break;
472                 case RTE_INTR_HANDLE_DEV_EVENT:
473                         bytes_read = 0;
474                         call = true;
475                         break;
476                 default:
477                         bytes_read = 1;
478                         break;
479                 }
480
481                 if (bytes_read > 0) {
482                         /**
483                          * read out to clear the ready-to-be-read flag
484                          * for epoll_wait.
485                          */
486                         bytes_read = read(event_fd, &buf, bytes_read);
487                         if (bytes_read < 0) {
488                                 if (errno == EINTR || errno == EWOULDBLOCK)
489                                         continue;
490
491                                 RTE_LOG(ERR, EAL, "Error reading from file "
492                                         "descriptor %d: %s\n",
493                                         event_fd,
494                                         strerror(errno));
495                         } else if (bytes_read == 0)
496                                 RTE_LOG(ERR, EAL, "Read nothing from file "
497                                         "descriptor %d\n", event_fd);
498                         else
499                                 call = true;
500                 }
501
502                 /* grab a lock, again to call callbacks and update status. */
503                 rte_spinlock_lock(&intr_lock);
504
505                 if (call) {
506                         /* Finally, call all callbacks. */
507                         TAILQ_FOREACH(cb, &src->callbacks, next) {
508
509                                 /* make a copy and unlock. */
510                                 active_cb = *cb;
511                                 rte_spinlock_unlock(&intr_lock);
512
513                                 /* call the actual callback */
514                                 active_cb.cb_fn(active_cb.cb_arg);
515
516                                 /*get the lock back. */
517                                 rte_spinlock_lock(&intr_lock);
518                         }
519                 }
520
521                 /* we done with that interrupt source, release it. */
522                 src->active = 0;
523
524                 /* check if any callback are supposed to be removed */
525                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
526                         next = TAILQ_NEXT(cb, next);
527                         if (cb->pending_delete) {
528                                 /* remove it from the kqueue */
529                                 memset(&ke, 0, sizeof(ke));
530                                 /* mark for deletion from the queue */
531                                 ke.flags = EV_DELETE;
532
533                                 if (intr_source_to_kevent(&src->intr_handle, &ke) < 0) {
534                                         RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
535                                         rte_spinlock_unlock(&intr_lock);
536                                         return;
537                                 }
538
539                                 /**
540                                  * remove intr file descriptor from wait list.
541                                  */
542                                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
543                                         RTE_LOG(ERR, EAL, "Error removing fd %d kevent, "
544                                                 "%s\n", src->intr_handle.fd,
545                                                 strerror(errno));
546                                         /* removing non-existent even is an expected
547                                          * condition in some circumstances
548                                          * (e.g. oneshot events).
549                                          */
550                                 }
551
552                                 TAILQ_REMOVE(&src->callbacks, cb, next);
553                                 if (cb->ucb_fn)
554                                         cb->ucb_fn(&src->intr_handle, cb->cb_arg);
555                                 free(cb);
556                         }
557                 }
558
559                 /* all callbacks for that source are removed. */
560                 if (TAILQ_EMPTY(&src->callbacks)) {
561                         TAILQ_REMOVE(&intr_sources, src, next);
562                         free(src);
563                 }
564
565                 rte_spinlock_unlock(&intr_lock);
566         }
567 }
568
569 static void *
570 eal_intr_thread_main(void *arg __rte_unused)
571 {
572         struct kevent events[MAX_INTR_EVENTS];
573         int nfds;
574
575         /* host thread, never break out */
576         for (;;) {
577                 /* do not change anything, just wait */
578                 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
579
580                 /* kevent fail */
581                 if (nfds < 0) {
582                         if (errno == EINTR)
583                                 continue;
584                         RTE_LOG(ERR, EAL,
585                                 "kevent returns with fail\n");
586                         break;
587                 }
588                 /* kevent timeout, will never happen here */
589                 else if (nfds == 0)
590                         continue;
591
592                 /* kevent has at least one fd ready to read */
593                 eal_intr_process_interrupts(events, nfds);
594         }
595         close(kq);
596         kq = -1;
597         return NULL;
598 }
599
600 int
601 rte_eal_intr_init(void)
602 {
603         int ret = 0;
604
605         /* init the global interrupt source head */
606         TAILQ_INIT(&intr_sources);
607
608         kq = kqueue();
609         if (kq < 0) {
610                 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
611                 return -1;
612         }
613
614         /* create the host thread to wait/handle the interrupt */
615         ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
616                         eal_intr_thread_main, NULL);
617         if (ret != 0) {
618                 rte_errno = -ret;
619                 RTE_LOG(ERR, EAL,
620                         "Failed to create thread for interrupt handling\n");
621         }
622
623         return ret;
624 }
625
626 int
627 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
628                 int epfd, int op, unsigned int vec, void *data)
629 {
630         RTE_SET_USED(intr_handle);
631         RTE_SET_USED(epfd);
632         RTE_SET_USED(op);
633         RTE_SET_USED(vec);
634         RTE_SET_USED(data);
635
636         return -ENOTSUP;
637 }
638
639 int
640 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
641 {
642         RTE_SET_USED(intr_handle);
643         RTE_SET_USED(nb_efd);
644
645         return 0;
646 }
647
648 void
649 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
650 {
651         RTE_SET_USED(intr_handle);
652 }
653
654 int
655 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
656 {
657         RTE_SET_USED(intr_handle);
658         return 0;
659 }
660
661 int
662 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
663 {
664         RTE_SET_USED(intr_handle);
665         return 1;
666 }
667
668 int
669 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
670 {
671         RTE_SET_USED(intr_handle);
672         return 0;
673 }
674
675 int
676 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
677                 int maxevents, int timeout)
678 {
679         RTE_SET_USED(epfd);
680         RTE_SET_USED(events);
681         RTE_SET_USED(maxevents);
682         RTE_SET_USED(timeout);
683
684         return -ENOTSUP;
685 }
686
687 int
688 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
689 {
690         RTE_SET_USED(epfd);
691         RTE_SET_USED(op);
692         RTE_SET_USED(fd);
693         RTE_SET_USED(event);
694
695         return -ENOTSUP;
696 }
697
698 int
699 rte_intr_tls_epfd(void)
700 {
701         return -ENOTSUP;
702 }
703
704 void
705 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
706 {
707         RTE_SET_USED(intr_handle);
708 }
709
710 int rte_thread_is_intr(void)
711 {
712         return pthread_equal(intr_thread, pthread_self());
713 }