2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2015 Intel Corporation.
4 * Copyright 2012 Hasan Alayli <halayli@gmail.com>
10 * @b EXPERIMENTAL: this API may change without prior notice
12 * This file contains the public API for the L-thread subsystem
14 * The L_thread subsystem provides a simple cooperative scheduler to
15 * enable arbitrary functions to run as cooperative threads within a
18 * The subsystem provides a P-thread like API that is intended to assist in
19 * reuse of legacy code written for POSIX p_threads.
21 * The L-thread subsystem relies on cooperative multitasking, as such
22 * an L-thread must possess frequent rescheduling points. Often these
23 * rescheduling points are provided transparently when the application
24 * invokes an L-thread API.
26 * In some applications it is possible that the program may enter a loop the
27 * exit condition for which depends on the action of another thread or a
28 * response from hardware. In such a case it is necessary to yield the thread
29 * periodically in the loop body, to allow other threads an opportunity to
30 * run. This can be done by inserting a call to lthread_yield() or
31 * lthread_sleep(n) in the body of the loop.
33 * If the application makes expensive / blocking system calls or does other
34 * work that would take an inordinate amount of time to complete, this will
35 * stall the cooperative scheduler resulting in very poor performance.
37 * In such cases an L-thread can be migrated temporarily to another scheduler
38 * running in a different P-thread on another core. When the expensive or
39 * blocking operation is completed it can be migrated back to the original
40 * scheduler. In this way other threads can continue to run on the original
41 * scheduler and will be completely unaffected by the blocking behaviour.
42 * To migrate an L-thread to another scheduler the API lthread_set_affinity()
45 * If L-threads that share data are running on the same core it is possible
46 * to design programs where mutual exclusion mechanisms to protect shared data
47 * can be avoided. This is due to the fact that the cooperative threads cannot
50 * There are two cases where mutual exclusion mechanisms are necessary.
52 * a) Where the L-threads sharing data are running on different cores.
53 * b) Where code must yield while updating data shared with another thread.
55 * The L-thread subsystem provides a set of mutex APIs to help with such
56 * scenarios, however excessive reliance on on these will impact performance
57 * and is best avoided if possible.
59 * L-threads can synchronise using a fast condition variable implementation
60 * that supports signal and broadcast. An L-thread running on any core can
61 * wait on a condition.
63 * L-threads can have L-thread local storage with an API modelled on either the
64 * P-thread get/set specific API or using PER_LTHREAD macros modelled on the
65 * RTE_PER_LCORE macros. Alternatively a simple user data pointer may be set
66 * and retrieved from a thread.
76 #include <sys/socket.h>
78 #include <netinet/in.h>
80 #include <rte_cycles.h>
87 struct lthread_condattr;
88 struct lthread_mutexattr;
90 typedef void *(*lthread_func_t) (void *);
93 * Define the size of stack for an lthread
94 * Then this is the size that will be allocated on lthread creation
95 * This is a fixed size and will not grow.
97 #define LTHREAD_MAX_STACK_SIZE (1024*64)
100 * Define the maximum number of TLS keys that can be created
103 #define LTHREAD_MAX_KEYS 1024
106 * Define the maximum number of attempts to destroy an lthread's
107 * TLS data on thread exit
109 #define LTHREAD_DESTRUCTOR_ITERATIONS 4
113 * Define the maximum number of lcores that will support lthreads
115 #define LTHREAD_MAX_LCORES RTE_MAX_LCORE
118 * How many lthread objects to pre-allocate as the system grows
119 * applies to lthreads + stacks, TLS, mutexs, cond vars.
121 * @see _lthread_alloc()
123 * @see _mutex_alloc()
126 #define LTHREAD_PREALLOC 100
129 * Set the number of schedulers in the system.
131 * This function may optionally be called before starting schedulers.
133 * If the number of schedulers is not set, or set to 0 then each scheduler
134 * will begin scheduling lthreads immediately it is started.
136 * If the number of schedulers is set to greater than 0, then each scheduler
137 * will wait until all schedulers have started before beginning to schedule
140 * If an application wishes to have threads migrate between cores using
141 * lthread_set_affinity(), or join threads running on other cores using
142 * lthread_join(), then it is prudent to set the number of schedulers to ensure
143 * that all schedulers are initialised beforehand.
146 * the number of schedulers in the system
148 * the number of schedulers in the system
150 int lthread_num_schedulers_set(int num);
153 * Return the number of schedulers currently running
155 * the number of schedulers in the system
157 int lthread_active_schedulers(void);
160 * Shutdown the specified scheduler
162 * This function tells the specified scheduler to
163 * exit if/when there is no more work to do.
165 * Note that although the scheduler will stop
166 * resources are not freed.
169 * The lcore of the scheduler to shutdown
174 void lthread_scheduler_shutdown(unsigned lcore);
177 * Shutdown all schedulers
179 * This function tells all schedulers including the current scheduler to
180 * exit if/when there is no more work to do.
182 * Note that although the schedulers will stop
183 * resources are not freed.
188 void lthread_scheduler_shutdown_all(void);
191 * Run the lthread scheduler
193 * Runs the lthread scheduler.
194 * This function returns only if/when all lthreads have exited.
195 * This function must be the main loop of an EAL thread.
201 void lthread_run(void);
206 * Creates an lthread and places it in the ready queue on a particular
209 * If no scheduler exists yet on the current lcore then one is created.
212 * Pointer to an lthread pointer that will be initialized
214 * the lcore the thread should be started on or the current lcore
215 * -1 the current lcore
216 * 0 - LTHREAD_MAX_LCORES any other lcore
217 * @param lthread_func
218 * Pointer to the function the for the thread to run
220 * Pointer to args that will be passed to the thread
224 * EAGAIN no resources available
225 * EINVAL NULL thread or function pointer, or lcore_id out of range
228 lthread_create(struct lthread **new_lt,
229 int lcore, lthread_func_t func, void *arg);
234 * Cancels an lthread and causes it to be terminated
235 * If the lthread is detached it will be freed immediately
236 * otherwise its resources will not be released until it is joined.
239 * Pointer to an lthread that will be cancelled
243 * EINVAL thread was NULL
245 int lthread_cancel(struct lthread *lt);
250 * Joins the current thread with the specified lthread, and waits for that
252 * Passes an optional pointer to collect returned data.
255 * Pointer to the lthread to be joined
257 * Pointer to pointer to collect returned data
261 * EINVAL lthread could not be joined.
263 int lthread_join(struct lthread *lt, void **ptr);
268 * Detaches the current thread
269 * On exit a detached lthread will be freed immediately and will not wait
270 * to be joined. The default state for a thread is not detached.
275 void lthread_detach(void);
280 * Terminate the current thread, optionally return data.
281 * The data may be collected by lthread_join()
283 * After calling this function the lthread will be suspended until it is
284 * joined. After it is joined then its resources will be freed.
287 * Pointer to pointer to data to be returned
292 void lthread_exit(void *val);
295 * Cause the current lthread to sleep for n nanoseconds
297 * The current thread will be suspended until the specified time has elapsed
298 * or has been exceeded.
300 * Execution will switch to the next lthread that is ready to run
303 * Number of nanoseconds to sleep
308 void lthread_sleep(uint64_t nsecs);
311 * Cause the current lthread to sleep for n cpu clock ticks
313 * The current thread will be suspended until the specified time has elapsed
314 * or has been exceeded.
316 * Execution will switch to the next lthread that is ready to run
319 * Number of clock ticks to sleep
324 void lthread_sleep_clks(uint64_t clks);
327 * Yield the current lthread
329 * The current thread will yield and execution will switch to the
330 * next lthread that is ready to run
335 void lthread_yield(void);
338 * Migrate the current thread to another scheduler
340 * This function migrates the current thread to another scheduler.
341 * Execution will switch to the next lthread that is ready to run on the
342 * current scheduler. The current thread will be resumed on the new scheduler.
345 * The lcore to migrate to
348 * 0 success we are now running on the specified core
349 * EINVAL the destination lcore was not valid
351 int lthread_set_affinity(unsigned lcore);
354 * Return the current lthread
356 * Returns the current lthread
359 * pointer to the current lthread
362 *lthread_current(void);
365 * Associate user data with an lthread
367 * This function sets a user data pointer in the current lthread
368 * The pointer can be retrieved with lthread_get_data()
369 * It is the users responsibility to allocate and free any data referenced
370 * by the user pointer.
373 * pointer to user data
378 void lthread_set_data(void *data);
381 * Get user data for the current lthread
383 * This function returns a user data pointer for the current lthread
384 * The pointer must first be set with lthread_set_data()
385 * It is the users responsibility to allocate and free any data referenced
386 * by the user pointer.
389 * pointer to user data
392 *lthread_get_data(void);
395 typedef void (*tls_destructor_func) (void *);
398 * Create a key for lthread TLS
400 * This function is modelled on pthread_key_create
401 * It creates a thread-specific data key visible to all lthreads on the
404 * Key values may be used to locate thread-specific data.
405 * The same key value may be used by different threads, the values bound
406 * to the key by lthread_setspecific() are maintained on a per-thread
407 * basis and persist for the life of the calling thread.
409 * An optional destructor function may be associated with each key value.
410 * At thread exit, if a key value has a non-NULL destructor pointer, and the
411 * thread has a non-NULL value associated with the key, the function pointed
412 * to is called with the current associated value as its sole argument.
415 * Pointer to the key to be created
417 * Pointer to destructor function
421 * EINVAL the key ptr was NULL
422 * EAGAIN no resources available
424 int lthread_key_create(unsigned int *key, tls_destructor_func destructor);
427 * Delete key for lthread TLS
429 * This function is modelled on pthread_key_delete().
430 * It deletes a thread-specific data key previously returned by
431 * lthread_key_create().
432 * The thread-specific data values associated with the key need not be NULL
433 * at the time that lthread_key_delete is called.
434 * It is the responsibility of the application to free any application
435 * storage or perform any cleanup actions for data structures related to the
436 * deleted key. This cleanup can be done either before or after
437 * lthread_key_delete is called.
440 * The key to be deleted
444 * EINVAL the key was invalid
446 int lthread_key_delete(unsigned int key);
451 * This function is modelled on pthread_get_specific().
452 * It returns the value currently bound to the specified key on behalf of the
453 * calling thread. Calling lthread_getspecific() with a key value not
454 * obtained from lthread_key_create() or after key has been deleted with
455 * lthread_key_delete() will result in undefined behaviour.
456 * lthread_getspecific() may be called from a thread-specific data destructor
460 * The key for which data is requested
463 * Pointer to the thread specific data associated with that key
464 * or NULL if no data has been set.
467 *lthread_getspecific(unsigned int key);
472 * This function is modelled on pthread_set_specific()
473 * It associates a thread-specific value with a key obtained via a previous
474 * call to lthread_key_create().
475 * Different threads may bind different values to the same key. These values
476 * are typically pointers to dynamically allocated memory that have been
477 * reserved by the calling thread. Calling lthread_setspecific with a key
478 * value not obtained from lthread_key_create or after the key has been
479 * deleted with lthread_key_delete will result in undefined behaviour.
482 * The key for which data is to be set
484 * Pointer to the user data
488 * EINVAL the key was invalid
491 int lthread_setspecific(unsigned int key, const void *value);
494 * The macros below provide an alternative mechanism to access lthread local
497 * The macros can be used to declare define and access per lthread local
498 * storage in a similar way to the RTE_PER_LCORE macros which control storage
501 * Memory for per lthread variables declared in this way is allocated when the
502 * lthread is created and a pointer to this memory is stored in the lthread.
503 * The per lthread variables are accessed via the pointer + the offset of the
504 * particular variable.
506 * The total size of per lthread storage, and the variable offsets are found by
507 * defining the variables in a unique global memory section, the start and end
508 * of which is known. This global memory section is used only in the
509 * computation of the addresses of the lthread variables, and is never actually
510 * used to store any data.
512 * Due to the fact that variables declared this way may be scattered across
513 * many files, the start and end of the section and variable offsets are only
514 * known after linking, thus the computation of section size and variable
515 * addresses is performed at run time.
517 * These macros are primarily provided to aid porting of code that makes use
518 * of the existing RTE_PER_LCORE macros. In principle it would be more efficient
519 * to gather all lthread local variables into a single structure and
520 * set/retrieve a pointer to that struct using the alternative
521 * lthread_data_set/get APIs.
523 * These macros are mutually exclusive with the lthread_data_set/get APIs.
524 * If you define storage using these macros then the lthread_data_set/get APIs
525 * will not perform as expected, the lthread_data_set API does nothing, and the
526 * lthread_data_get API returns the start of global section.
529 /* start and end of per lthread section */
530 extern char __start_per_lt;
531 extern char __stop_per_lt;
534 #define RTE_DEFINE_PER_LTHREAD(type, name) \
535 __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
538 * Macro to declare an extern per lthread variable "var" of type "type"
540 #define RTE_DECLARE_PER_LTHREAD(type, name) \
541 extern __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
544 * Read/write the per-lcore variable value
546 #define RTE_PER_LTHREAD(name) ((typeof(per_lt_##name) *)\
547 ((char *)lthread_get_data() +\
548 ((char *) &per_lt_##name - &__start_per_lt)))
553 * This function provides a mutual exclusion device, the need for which
554 * can normally be avoided in a cooperative multitasking environment.
555 * It is provided to aid porting of legacy code originally written for
556 * preemptive multitasking environments such as pthreads.
558 * A mutex may be unlocked (not owned by any thread), or locked (owned by
561 * A mutex can never be owned by more than one thread simultaneously.
562 * A thread attempting to lock a mutex that is already locked by another
563 * thread is suspended until the owning thread unlocks the mutex.
565 * lthread_mutex_init() initializes the mutex object pointed to by mutex
566 * Optional mutex attributes specified in mutexattr, are reserved for future
567 * use and are currently ignored.
569 * If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
570 * is currently unlocked, it becomes locked and owned by the calling
571 * thread, and lthread_mutex_lock returns immediately. If the mutex is
572 * already locked by another thread, lthread_mutex_lock suspends the calling
573 * thread until the mutex is unlocked.
575 * lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
576 * that it does not block the calling thread if the mutex is already locked
579 * lthread_mutex_unlock() unlocks the specified mutex. The mutex is assumed
580 * to be locked and owned by the calling thread.
582 * lthread_mutex_destroy() destroys a mutex object, freeing its resources.
583 * The mutex must be unlocked with nothing blocked on it before calling
584 * lthread_mutex_destroy.
587 * Optional pointer to string describing the mutex
589 * Pointer to pointer to the mutex to be initialized
591 * Pointer to attribute - unused reserved
595 * EINVAL mutex was not a valid pointer
596 * EAGAIN insufficient resources
600 lthread_mutex_init(char *name, struct lthread_mutex **mutex,
601 const struct lthread_mutexattr *attr);
606 * This function destroys the specified mutex freeing its resources.
607 * The mutex must be unlocked before calling lthread_mutex_destroy.
609 * @see lthread_mutex_init()
612 * Pointer to pointer to the mutex to be initialized
616 * EINVAL mutex was not an initialized mutex
617 * EBUSY mutex was still in use
619 int lthread_mutex_destroy(struct lthread_mutex *mutex);
624 * This function attempts to lock a mutex.
625 * If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
626 * is currently unlocked, it becomes locked and owned by the calling
627 * thread, and lthread_mutex_lock returns immediately. If the mutex is
628 * already locked by another thread, lthread_mutex_lock suspends the calling
629 * thread until the mutex is unlocked.
631 * @see lthread_mutex_init()
634 * Pointer to pointer to the mutex to be initialized
638 * EINVAL mutex was not an initialized mutex
639 * EDEADLOCK the mutex was already owned by the calling thread
642 int lthread_mutex_lock(struct lthread_mutex *mutex);
645 * Try to lock a mutex
647 * This function attempts to lock a mutex.
648 * lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
649 * that it does not block the calling thread if the mutex is already locked
653 * @see lthread_mutex_init()
656 * Pointer to pointer to the mutex to be initialized
660 * EINVAL mutex was not an initialized mutex
661 * EBUSY the mutex was already locked by another thread
663 int lthread_mutex_trylock(struct lthread_mutex *mutex);
668 * This function attempts to unlock the specified mutex. The mutex is assumed
669 * to be locked and owned by the calling thread.
671 * The oldest of any threads blocked on the mutex is made ready and may
672 * compete with any other running thread to gain the mutex, it fails it will
676 * Pointer to pointer to the mutex to be initialized
679 * 0 mutex was unlocked
680 * EINVAL mutex was not an initialized mutex
681 * EPERM the mutex was not owned by the calling thread
684 int lthread_mutex_unlock(struct lthread_mutex *mutex);
687 * Initialize a condition variable
689 * This function initializes a condition variable.
691 * Condition variables can be used to communicate changes in the state of data
692 * shared between threads.
694 * @see lthread_cond_wait()
697 * Pointer to optional string describing the condition variable
699 * Pointer to pointer to the condition variable to be initialized
701 * Pointer to optional attribute reserved for future use, currently ignored
705 * EINVAL cond was not a valid pointer
706 * EAGAIN insufficient resources
709 lthread_cond_init(char *name, struct lthread_cond **c,
710 const struct lthread_condattr *attr);
713 * Destroy a condition variable
715 * This function destroys a condition variable that was created with
716 * lthread_cond_init() and releases its resources.
719 * Pointer to pointer to the condition variable to be destroyed
723 * EBUSY condition variable was still in use
724 * EINVAL was not an initialised condition variable
726 int lthread_cond_destroy(struct lthread_cond *cond);
729 * Wait on a condition variable
731 * The function blocks the current thread waiting on the condition variable
732 * specified by cond. The waiting thread unblocks only after another thread
733 * calls lthread_cond_signal, or lthread_cond_broadcast, specifying the
734 * same condition variable.
737 * Pointer to pointer to the condition variable to be waited on
740 * reserved for future use
743 * 0 The condition was signalled ( Success )
744 * EINVAL was not a an initialised condition variable
746 int lthread_cond_wait(struct lthread_cond *c, uint64_t reserved);
749 * Signal a condition variable
751 * The function unblocks one thread waiting for the condition variable cond.
752 * If no threads are waiting on cond, the rte_lthread_cond_signal() function
756 * Pointer to pointer to the condition variable to be signalled
759 * 0 The condition was signalled ( Success )
760 * EINVAL was not a an initialised condition variable
762 int lthread_cond_signal(struct lthread_cond *c);
765 * Broadcast a condition variable
767 * The function unblocks all threads waiting for the condition variable cond.
768 * If no threads are waiting on cond, the rte_lathed_cond_broadcast()
769 * function has no effect.
772 * Pointer to pointer to the condition variable to be signalled
775 * 0 The condition was signalled ( Success )
776 * EINVAL was not a an initialised condition variable
778 int lthread_cond_broadcast(struct lthread_cond *c);
784 #endif /* LTHREAD_H */