bus/fslmc: support 32 enqueues/dequeues for LX2
[dpdk.git] / lib / librte_eal / bsdapp / eal / eal_interrupts.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <sys/types.h>
7 #include <sys/event.h>
8 #include <sys/queue.h>
9 #include <unistd.h>
10
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_spinlock.h>
14 #include <rte_common.h>
15 #include <rte_interrupts.h>
16
17 #include "eal_private.h"
18 #include "eal_alarm_private.h"
19
20 #define MAX_INTR_EVENTS 16
21
22 /**
23  * union buffer for reading on different devices
24  */
25 union rte_intr_read_buffer {
26         char charbuf[16];                /* for others */
27 };
28
29 TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
30 TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
31
32 struct rte_intr_callback {
33         TAILQ_ENTRY(rte_intr_callback) next;
34         rte_intr_callback_fn cb_fn;  /**< callback address */
35         void *cb_arg;                /**< parameter for callback */
36 };
37
38 struct rte_intr_source {
39         TAILQ_ENTRY(rte_intr_source) next;
40         struct rte_intr_handle intr_handle; /**< interrupt handle */
41         struct rte_intr_cb_list callbacks;  /**< user callbacks */
42         uint32_t active;
43 };
44
45 /* global spinlock for interrupt data operation */
46 static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
47
48 /* interrupt sources list */
49 static struct rte_intr_source_list intr_sources;
50
51 /* interrupt handling thread */
52 static pthread_t intr_thread;
53
54 static volatile int kq = -1;
55
56 static int
57 intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
58 {
59         /* alarm callbacks are special case */
60         if (ih->type == RTE_INTR_HANDLE_ALARM) {
61                 uint64_t timeout_ns;
62
63                 /* get soonest alarm timeout */
64                 if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
65                         return -1;
66
67                 ke->filter = EVFILT_TIMER;
68                 /* timers are one shot */
69                 ke->flags |= EV_ONESHOT;
70                 ke->fflags = NOTE_NSECONDS;
71                 ke->data = timeout_ns;
72         } else {
73                 ke->filter = EVFILT_READ;
74         }
75         ke->ident = ih->fd;
76
77         return 0;
78 }
79
80 int
81 rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
82                 rte_intr_callback_fn cb, void *cb_arg)
83 {
84         struct rte_intr_callback *callback = NULL;
85         struct rte_intr_source *src = NULL;
86         int ret, add_event;
87
88         /* first do parameter checking */
89         if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
90                 RTE_LOG(ERR, EAL,
91                         "Registering with invalid input parameter\n");
92                 return -EINVAL;
93         }
94         if (kq < 0) {
95                 RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
96                 return -ENODEV;
97         }
98
99         /* allocate a new interrupt callback entity */
100         callback = calloc(1, sizeof(*callback));
101         if (callback == NULL) {
102                 RTE_LOG(ERR, EAL, "Can not allocate memory\n");
103                 return -ENOMEM;
104         }
105         callback->cb_fn = cb;
106         callback->cb_arg = cb_arg;
107
108         rte_spinlock_lock(&intr_lock);
109
110         /* check if there is at least one callback registered for the fd */
111         TAILQ_FOREACH(src, &intr_sources, next) {
112                 if (src->intr_handle.fd == intr_handle->fd) {
113                         /* we had no interrupts for this */
114                         if (TAILQ_EMPTY(&src->callbacks))
115                                 add_event = 1;
116
117                         TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
118                         ret = 0;
119                         break;
120                 }
121         }
122
123         /* no existing callbacks for this - add new source */
124         if (src == NULL) {
125                 src = calloc(1, sizeof(*src));
126                 if (src == NULL) {
127                         RTE_LOG(ERR, EAL, "Can not allocate memory\n");
128                         ret = -ENOMEM;
129                         goto fail;
130                 } else {
131                         src->intr_handle = *intr_handle;
132                         TAILQ_INIT(&src->callbacks);
133                         TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
134                         TAILQ_INSERT_TAIL(&intr_sources, src, next);
135                         add_event = 1;
136                         ret = 0;
137                 }
138         }
139
140         /* add events to the queue. timer events are special as we need to
141          * re-set the timer.
142          */
143         if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
144                 struct kevent ke;
145
146                 memset(&ke, 0, sizeof(ke));
147                 ke.flags = EV_ADD; /* mark for addition to the queue */
148
149                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
150                         RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
151                         ret = -ENODEV;
152                         goto fail;
153                 }
154
155                 /**
156                  * add the intr file descriptor into wait list.
157                  */
158                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
159                         /* currently, nic_uio does not support interrupts, so
160                          * this error will always be triggered and output to the
161                          * user. so, don't output it unless debug log level set.
162                          */
163                         if (errno == ENODEV)
164                                 RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
165                                         src->intr_handle.fd);
166                         else
167                                 RTE_LOG(ERR, EAL, "Error adding fd %d "
168                                                 "kevent, %s\n",
169                                                 src->intr_handle.fd,
170                                                 strerror(errno));
171                         ret = -errno;
172                         goto fail;
173                 }
174         }
175         rte_spinlock_unlock(&intr_lock);
176
177         return ret;
178 fail:
179         /* clean up */
180         if (src != NULL) {
181                 TAILQ_REMOVE(&(src->callbacks), callback, next);
182                 if (TAILQ_EMPTY(&(src->callbacks))) {
183                         TAILQ_REMOVE(&intr_sources, src, next);
184                         free(src);
185                 }
186         }
187         free(callback);
188         rte_spinlock_unlock(&intr_lock);
189         return ret;
190 }
191
192 int
193 rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
194                 rte_intr_callback_fn cb_fn, void *cb_arg)
195 {
196         int ret;
197         struct rte_intr_source *src;
198         struct rte_intr_callback *cb, *next;
199
200         /* do parameter checking first */
201         if (intr_handle == NULL || intr_handle->fd < 0) {
202                 RTE_LOG(ERR, EAL,
203                 "Unregistering with invalid input parameter\n");
204                 return -EINVAL;
205         }
206         if (kq < 0) {
207                 RTE_LOG(ERR, EAL, "Kqueue is not active\n");
208                 return -ENODEV;
209         }
210
211         rte_spinlock_lock(&intr_lock);
212
213         /* check if the insterrupt source for the fd is existent */
214         TAILQ_FOREACH(src, &intr_sources, next)
215                 if (src->intr_handle.fd == intr_handle->fd)
216                         break;
217
218         /* No interrupt source registered for the fd */
219         if (src == NULL) {
220                 ret = -ENOENT;
221
222         /* interrupt source has some active callbacks right now. */
223         } else if (src->active != 0) {
224                 ret = -EAGAIN;
225
226         /* ok to remove. */
227         } else {
228                 struct kevent ke;
229
230                 ret = 0;
231
232                 /* remove it from the kqueue */
233                 memset(&ke, 0, sizeof(ke));
234                 ke.flags = EV_DELETE; /* mark for deletion from the queue */
235
236                 if (intr_source_to_kevent(intr_handle, &ke) < 0) {
237                         RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
238                         ret = -ENODEV;
239                         goto out;
240                 }
241
242                 /**
243                  * remove intr file descriptor from wait list.
244                  */
245                 if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
246                         RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
247                                 src->intr_handle.fd, strerror(errno));
248                         /* removing non-existent even is an expected condition
249                          * in some circumstances (e.g. oneshot events).
250                          */
251                 }
252
253                 /*walk through the callbacks and remove all that match. */
254                 for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
255                         next = TAILQ_NEXT(cb, next);
256                         if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
257                                         cb->cb_arg == cb_arg)) {
258                                 TAILQ_REMOVE(&src->callbacks, cb, next);
259                                 free(cb);
260                                 ret++;
261                         }
262                 }
263
264                 /* all callbacks for that source are removed. */
265                 if (TAILQ_EMPTY(&src->callbacks)) {
266                         TAILQ_REMOVE(&intr_sources, src, next);
267                         free(src);
268                 }
269         }
270 out:
271         rte_spinlock_unlock(&intr_lock);
272
273         return ret;
274 }
275
276 int
277 rte_intr_enable(const struct rte_intr_handle *intr_handle)
278 {
279         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
280                 return 0;
281
282         if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
283                 return -1;
284
285         switch (intr_handle->type) {
286         /* not used at this moment */
287         case RTE_INTR_HANDLE_ALARM:
288                 return -1;
289         /* not used at this moment */
290         case RTE_INTR_HANDLE_DEV_EVENT:
291                 return -1;
292         /* unknown handle type */
293         default:
294                 RTE_LOG(ERR, EAL,
295                         "Unknown handle type of fd %d\n",
296                                         intr_handle->fd);
297                 return -1;
298         }
299
300         return 0;
301 }
302
303 int
304 rte_intr_disable(const struct rte_intr_handle *intr_handle)
305 {
306         if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
307                 return 0;
308
309         if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
310                 return -1;
311
312         switch (intr_handle->type) {
313         /* not used at this moment */
314         case RTE_INTR_HANDLE_ALARM:
315                 return -1;
316         /* not used at this moment */
317         case RTE_INTR_HANDLE_DEV_EVENT:
318                 return -1;
319         /* unknown handle type */
320         default:
321                 RTE_LOG(ERR, EAL,
322                         "Unknown handle type of fd %d\n",
323                                         intr_handle->fd);
324                 return -1;
325         }
326
327         return 0;
328 }
329
330 static void
331 eal_intr_process_interrupts(struct kevent *events, int nfds)
332 {
333         struct rte_intr_callback active_cb;
334         union rte_intr_read_buffer buf;
335         struct rte_intr_callback *cb;
336         struct rte_intr_source *src;
337         bool call = false;
338         int n, bytes_read;
339
340         for (n = 0; n < nfds; n++) {
341                 int event_fd = events[n].ident;
342
343                 rte_spinlock_lock(&intr_lock);
344                 TAILQ_FOREACH(src, &intr_sources, next)
345                         if (src->intr_handle.fd == event_fd)
346                                 break;
347                 if (src == NULL) {
348                         rte_spinlock_unlock(&intr_lock);
349                         continue;
350                 }
351
352                 /* mark this interrupt source as active and release the lock. */
353                 src->active = 1;
354                 rte_spinlock_unlock(&intr_lock);
355
356                 /* set the length to be read dor different handle type */
357                 switch (src->intr_handle.type) {
358                 case RTE_INTR_HANDLE_ALARM:
359                         bytes_read = 0;
360                         call = true;
361                         break;
362                 case RTE_INTR_HANDLE_VDEV:
363                 case RTE_INTR_HANDLE_EXT:
364                         bytes_read = 0;
365                         call = true;
366                         break;
367                 case RTE_INTR_HANDLE_DEV_EVENT:
368                         bytes_read = 0;
369                         call = true;
370                         break;
371                 default:
372                         bytes_read = 1;
373                         break;
374                 }
375
376                 if (bytes_read > 0) {
377                         /**
378                          * read out to clear the ready-to-be-read flag
379                          * for epoll_wait.
380                          */
381                         bytes_read = read(event_fd, &buf, bytes_read);
382                         if (bytes_read < 0) {
383                                 if (errno == EINTR || errno == EWOULDBLOCK)
384                                         continue;
385
386                                 RTE_LOG(ERR, EAL, "Error reading from file "
387                                         "descriptor %d: %s\n",
388                                         event_fd,
389                                         strerror(errno));
390                         } else if (bytes_read == 0)
391                                 RTE_LOG(ERR, EAL, "Read nothing from file "
392                                         "descriptor %d\n", event_fd);
393                         else
394                                 call = true;
395                 }
396
397                 /* grab a lock, again to call callbacks and update status. */
398                 rte_spinlock_lock(&intr_lock);
399
400                 if (call) {
401                         /* Finally, call all callbacks. */
402                         TAILQ_FOREACH(cb, &src->callbacks, next) {
403
404                                 /* make a copy and unlock. */
405                                 active_cb = *cb;
406                                 rte_spinlock_unlock(&intr_lock);
407
408                                 /* call the actual callback */
409                                 active_cb.cb_fn(active_cb.cb_arg);
410
411                                 /*get the lock back. */
412                                 rte_spinlock_lock(&intr_lock);
413                         }
414                 }
415
416                 /* we done with that interrupt source, release it. */
417                 src->active = 0;
418                 rte_spinlock_unlock(&intr_lock);
419         }
420 }
421
422 static void *
423 eal_intr_thread_main(void *arg __rte_unused)
424 {
425         struct kevent events[MAX_INTR_EVENTS];
426         int nfds;
427
428         /* host thread, never break out */
429         for (;;) {
430                 /* do not change anything, just wait */
431                 nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
432
433                 /* kevent fail */
434                 if (nfds < 0) {
435                         if (errno == EINTR)
436                                 continue;
437                         RTE_LOG(ERR, EAL,
438                                 "kevent returns with fail\n");
439                         break;
440                 }
441                 /* kevent timeout, will never happen here */
442                 else if (nfds == 0)
443                         continue;
444
445                 /* kevent has at least one fd ready to read */
446                 eal_intr_process_interrupts(events, nfds);
447         }
448         close(kq);
449         kq = -1;
450         return NULL;
451 }
452
453 int
454 rte_eal_intr_init(void)
455 {
456         int ret = 0;
457
458         /* init the global interrupt source head */
459         TAILQ_INIT(&intr_sources);
460
461         kq = kqueue();
462         if (kq < 0) {
463                 RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
464                 return -1;
465         }
466
467         /* create the host thread to wait/handle the interrupt */
468         ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
469                         eal_intr_thread_main, NULL);
470         if (ret != 0) {
471                 rte_errno = -ret;
472                 RTE_LOG(ERR, EAL,
473                         "Failed to create thread for interrupt handling\n");
474         }
475
476         return ret;
477 }
478
479 int
480 rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
481                 int epfd, int op, unsigned int vec, void *data)
482 {
483         RTE_SET_USED(intr_handle);
484         RTE_SET_USED(epfd);
485         RTE_SET_USED(op);
486         RTE_SET_USED(vec);
487         RTE_SET_USED(data);
488
489         return -ENOTSUP;
490 }
491
492 int
493 rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
494 {
495         RTE_SET_USED(intr_handle);
496         RTE_SET_USED(nb_efd);
497
498         return 0;
499 }
500
501 void
502 rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
503 {
504         RTE_SET_USED(intr_handle);
505 }
506
507 int
508 rte_intr_dp_is_en(struct rte_intr_handle *intr_handle)
509 {
510         RTE_SET_USED(intr_handle);
511         return 0;
512 }
513
514 int
515 rte_intr_allow_others(struct rte_intr_handle *intr_handle)
516 {
517         RTE_SET_USED(intr_handle);
518         return 1;
519 }
520
521 int
522 rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
523 {
524         RTE_SET_USED(intr_handle);
525         return 0;
526 }
527
528 int
529 rte_epoll_wait(int epfd, struct rte_epoll_event *events,
530                 int maxevents, int timeout)
531 {
532         RTE_SET_USED(epfd);
533         RTE_SET_USED(events);
534         RTE_SET_USED(maxevents);
535         RTE_SET_USED(timeout);
536
537         return -ENOTSUP;
538 }
539
540 int
541 rte_epoll_ctl(int epfd, int op, int fd, struct rte_epoll_event *event)
542 {
543         RTE_SET_USED(epfd);
544         RTE_SET_USED(op);
545         RTE_SET_USED(fd);
546         RTE_SET_USED(event);
547
548         return -ENOTSUP;
549 }
550
551 int
552 rte_intr_tls_epfd(void)
553 {
554         return -ENOTSUP;
555 }
556
557 void
558 rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
559 {
560         RTE_SET_USED(intr_handle);
561 }