4 * Copyright(c) 2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Some portions of this software may have been derived from the
36 * https://github.com/halayli/lthread which carrys the following license.
38 * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
49 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * This file contains the public API for the L-thread subsystem
67 * The L_thread subsystem provides a simple cooperative scheduler to
68 * enable arbitrary functions to run as cooperative threads within a
71 * The subsystem provides a P-thread like API that is intended to assist in
72 * reuse of legacy code written for POSIX p_threads.
74 * The L-thread subsystem relies on cooperative multitasking, as such
75 * an L-thread must possess frequent rescheduling points. Often these
76 * rescheduling points are provided transparently when the application
77 * invokes an L-thread API.
79 * In some applications it is possible that the program may enter a loop the
80 * exit condition for which depends on the action of another thread or a
81 * response from hardware. In such a case it is necessary to yield the thread
82 * periodically in the loop body, to allow other threads an opportunity to
83 * run. This can be done by inserting a call to lthread_yield() or
84 * lthread_sleep(n) in the body of the loop.
86 * If the application makes expensive / blocking system calls or does other
87 * work that would take an inordinate amount of time to complete, this will
88 * stall the cooperative scheduler resulting in very poor performance.
90 * In such cases an L-thread can be migrated temporarily to another scheduler
91 * running in a different P-thread on another core. When the expensive or
92 * blocking operation is completed it can be migrated back to the original
93 * scheduler. In this way other threads can continue to run on the original
94 * scheduler and will be completely unaffected by the blocking behaviour.
95 * To migrate an L-thread to another scheduler the API lthread_set_affinity()
98 * If L-threads that share data are running on the same core it is possible
99 * to design programs where mutual exclusion mechanisms to protect shared data
100 * can be avoided. This is due to the fact that the cooperative threads cannot
101 * preempt each other.
103 * There are two cases where mutual exclusion mechanisms are necessary.
105 * a) Where the L-threads sharing data are running on different cores.
106 * b) Where code must yield while updating data shared with another thread.
108 * The L-thread subsystem provides a set of mutex APIs to help with such
109 * scenarios, however excessive reliance on on these will impact performance
110 * and is best avoided if possible.
112 * L-threads can synchronise using a fast condition variable implementation
113 * that supports signal and broadcast. An L-thread running on any core can
114 * wait on a condition.
116 * L-threads can have L-thread local storage with an API modelled on either the
117 * P-thread get/set specific API or using PER_LTHREAD macros modelled on the
118 * RTE_PER_LCORE macros. Alternatively a simple user data pointer may be set
119 * and retrieved from a thread.
125 #include <sys/socket.h>
127 #include <netinet/in.h>
129 #include <rte_cycles.h>
134 struct lthread_mutex;
136 struct lthread_condattr;
137 struct lthread_mutexattr;
139 typedef void (*lthread_func_t) (void *);
142 * Define the size of stack for an lthread
143 * Then this is the size that will be allocated on lthread creation
144 * This is a fixed size and will not grow.
146 #define LTHREAD_MAX_STACK_SIZE (1024*64)
149 * Define the maximum number of TLS keys that can be created
152 #define LTHREAD_MAX_KEYS 1024
155 * Define the maximum number of attempts to destroy an lthread's
156 * TLS data on thread exit
158 #define LTHREAD_DESTRUCTOR_ITERATIONS 4
162 * Define the maximum number of lcores that will support lthreads
164 #define LTHREAD_MAX_LCORES RTE_MAX_LCORE
167 * How many lthread objects to pre-allocate as the system grows
168 * applies to lthreads + stacks, TLS, mutexs, cond vars.
170 * @see _lthread_alloc()
172 * @see _mutex_alloc()
175 #define LTHREAD_PREALLOC 100
178 * Set the number of schedulers in the system.
180 * This function may optionally be called before starting schedulers.
182 * If the number of schedulers is not set, or set to 0 then each scheduler
183 * will begin scheduling lthreads immediately it is started.
185 * If the number of schedulers is set to greater than 0, then each scheduler
186 * will wait until all schedulers have started before beginning to schedule
189 * If an application wishes to have threads migrate between cores using
190 * lthread_set_affinity(), or join threads running on other cores using
191 * lthread_join(), then it is prudent to set the number of schedulers to ensure
192 * that all schedulers are initialised beforehand.
195 * the number of schedulers in the system
197 * the number of schedulers in the system
199 int lthread_num_schedulers_set(int num);
202 * Return the number of schedulers currently running
204 * the number of schedulers in the system
206 int lthread_active_schedulers(void);
209 * Shutdown the specified scheduler
211 * This function tells the specified scheduler to
212 * exit if/when there is no more work to do.
214 * Note that although the scheduler will stop
215 * resources are not freed.
218 * The lcore of the scheduler to shutdown
223 void lthread_scheduler_shutdown(unsigned lcore);
226 * Shutdown all schedulers
228 * This function tells all schedulers including the current scheduler to
229 * exit if/when there is no more work to do.
231 * Note that although the schedulers will stop
232 * resources are not freed.
237 void lthread_scheduler_shutdown_all(void);
240 * Run the lthread scheduler
242 * Runs the lthread scheduler.
243 * This function returns only if/when all lthreads have exited.
244 * This function must be the main loop of an EAL thread.
250 void lthread_run(void);
255 * Creates an lthread and places it in the ready queue on a particular
258 * If no scheduler exists yet on the curret lcore then one is created.
261 * Pointer to an lthread pointer that will be initialized
263 * the lcore the thread should be started on or the current clore
264 * -1 the current lcore
265 * 0 - LTHREAD_MAX_LCORES any other lcore
266 * @param lthread_func
267 * Pointer to the function the for the thread to run
269 * Pointer to args that will be passed to the thread
273 * EAGAIN no resources available
274 * EINVAL NULL thread or function pointer, or lcore_id out of range
277 lthread_create(struct lthread **new_lt,
278 int lcore, lthread_func_t func, void *arg);
283 * Cancels an lthread and causes it to be terminated
284 * If the lthread is detached it will be freed immediately
285 * otherwise its resources will not be released until it is joined.
288 * Pointer to an lthread that will be cancelled
292 * EINVAL thread was NULL
294 int lthread_cancel(struct lthread *lt);
299 * Joins the current thread with the specified lthread, and waits for that
301 * Passes an optional pointer to collect returned data.
304 * Pointer to the lthread to be joined
306 * Pointer to pointer to collect returned data
310 * EINVAL lthread could not be joined.
312 int lthread_join(struct lthread *lt, void **ptr);
317 * Detaches the current thread
318 * On exit a detached lthread will be freed immediately and will not wait
319 * to be joined. The default state for a thread is not detached.
324 void lthread_detach(void);
329 * Terminate the current thread, optionally return data.
330 * The data may be collected by lthread_join()
332 * After calling this function the lthread will be suspended until it is
333 * joined. After it is joined then its resources will be freed.
336 * Pointer to pointer to data to be returned
341 void lthread_exit(void *val);
344 * Cause the current lthread to sleep for n nanoseconds
346 * The current thread will be suspended until the specified time has elapsed
347 * or has been exceeded.
349 * Execution will switch to the next lthread that is ready to run
352 * Number of nanoseconds to sleep
357 void lthread_sleep(uint64_t nsecs);
360 * Cause the current lthread to sleep for n cpu clock ticks
362 * The current thread will be suspended until the specified time has elapsed
363 * or has been exceeded.
365 * Execution will switch to the next lthread that is ready to run
368 * Number of clock ticks to sleep
373 void lthread_sleep_clks(uint64_t clks);
376 * Yield the current lthread
378 * The current thread will yield and execution will switch to the
379 * next lthread that is ready to run
384 void lthread_yield(void);
387 * Migrate the current thread to another scheduler
389 * This function migrates the current thread to another scheduler.
390 * Execution will switch to the next lthread that is ready to run on the
391 * current scheduler. The current thread will be resumed on the new scheduler.
394 * The lcore to migrate to
397 * 0 success we are now running on the specified core
398 * EINVAL the destination lcore was not valid
400 int lthread_set_affinity(unsigned lcore);
403 * Return the current lthread
405 * Returns the current lthread
408 * pointer to the current lthread
411 *lthread_current(void);
414 * Associate user data with an lthread
416 * This function sets a user data pointer in the current lthread
417 * The pointer can be retrieved with lthread_get_data()
418 * It is the users responsibility to allocate and free any data referenced
419 * by the user pointer.
422 * pointer to user data
427 void lthread_set_data(void *data);
430 * Get user data for the current lthread
432 * This function returns a user data pointer for the current lthread
433 * The pointer must first be set with lthread_set_data()
434 * It is the users responsibility to allocate and free any data referenced
435 * by the user pointer.
438 * pointer to user data
441 *lthread_get_data(void);
444 typedef void (*tls_destructor_func) (void *);
447 * Create a key for lthread TLS
449 * This function is modelled on pthread_key_create
450 * It creates a thread-specific data key visible to all lthreads on the
453 * Key values may be used to locate thread-specific data.
454 * The same key value may be used by different threads, the values bound
455 * to the key by lthread_setspecific() are maintained on a per-thread
456 * basis and persist for the life of the calling thread.
458 * An optional destructor function may be associated with each key value.
459 * At thread exit, if a key value has a non-NULL destructor pointer, and the
460 * thread has a non-NULL value associated with the key, the function pointed
461 * to is called with the current associated value as its sole argument.
464 * Pointer to the key to be created
466 * Pointer to destructor function
470 * EINVAL the key ptr was NULL
471 * EAGAIN no resources available
473 int lthread_key_create(unsigned int *key, tls_destructor_func destructor);
476 * Delete key for lthread TLS
478 * This function is modelled on pthread_key_delete().
479 * It deletes a thread-specific data key previously returned by
480 * lthread_key_create().
481 * The thread-specific data values associated with the key need not be NULL
482 * at the time that lthread_key_delete is called.
483 * It is the responsibility of the application to free any application
484 * storage or perform any cleanup actions for data structures related to the
485 * deleted key. This cleanup can be done either before or after
486 * lthread_key_delete is called.
489 * The key to be deleted
493 * EINVAL the key was invalid
495 int lthread_key_delete(unsigned int key);
500 * This function is modelled on pthread_get_specific().
501 * It returns the value currently bound to the specified key on behalf of the
502 * calling thread. Calling lthread_getspecific() with a key value not
503 * obtained from lthread_key_create() or after key has been deleted with
504 * lthread_key_delete() will result in undefined behaviour.
505 * lthread_getspecific() may be called from a thread-specific data destructor
509 * The key for which data is requested
512 * Pointer to the thread specific data associated with that key
513 * or NULL if no data has been set.
516 *lthread_getspecific(unsigned int key);
521 * This function is modelled on pthread_set_sepcific()
522 * It associates a thread-specific value with a key obtained via a previous
523 * call to lthread_key_create().
524 * Different threads may bind different values to the same key. These values
525 * are typically pointers to dynamically allocated memory that have been
526 * reserved by the calling thread. Calling lthread_setspecific with a key
527 * value not obtained from lthread_key_create or after the key has been
528 * deleted with lthread_key_delete will result in undefined behaviour.
531 * The key for which data is to be set
533 * Pointer to the user data
537 * EINVAL the key was invalid
540 int lthread_setspecific(unsigned int key, const void *value);
543 * The macros below provide an alternative mechanism to access lthread local
546 * The macros can be used to declare define and access per lthread local
547 * storage in a similar way to the RTE_PER_LCORE macros which control storage
550 * Memory for per lthread variables declared in this way is allocated when the
551 * lthread is created and a pointer to this memory is stored in the lthread.
552 * The per lthread variables are accessed via the pointer + the offset of the
553 * particular variable.
555 * The total size of per lthread storage, and the variable offsets are found by
556 * defining the variables in a unique global memory section, the start and end
557 * of which is known. This global memory section is used only in the
558 * computation of the addresses of the lthread variables, and is never actually
559 * used to store any data.
561 * Due to the fact that variables declared this way may be scattered across
562 * many files, the start and end of the section and variable offsets are only
563 * known after linking, thus the computation of section size and variable
564 * addresses is performed at run time.
566 * These macros are primarily provided to aid porting of code that makes use
567 * of the existing RTE_PER_LCORE macros. In principle it would be more efficient
568 * to gather all lthread local variables into a single structure and
569 * set/retrieve a pointer to that struct using the alternative
570 * lthread_data_set/get APIs.
572 * These macros are mutually exclusive with the lthread_data_set/get APIs.
573 * If you define storage using these macros then the lthread_data_set/get APIs
574 * will not perform as expected, the lthread_data_set API does nothing, and the
575 * lthread_data_get API returns the start of global section.
578 /* start and end of per lthread section */
579 extern char __start_per_lt;
580 extern char __stop_per_lt;
583 #define RTE_DEFINE_PER_LTHREAD(type, name) \
584 __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
587 * Macro to declare an extern per lthread variable "var" of type "type"
589 #define RTE_DECLARE_PER_LTHREAD(type, name) \
590 extern __typeof__(type)__attribute((section("per_lt"))) per_lt_##name
593 * Read/write the per-lcore variable value
595 #define RTE_PER_LTHREAD(name) ((typeof(per_lt_##name) *)\
596 ((char *)lthread_get_data() +\
597 ((char *) &per_lt_##name - &__start_per_lt)))
602 * This function provides a mutual exclusion device, the need for which
603 * can normally be avoided in a cooperative multitasking environment.
604 * It is provided to aid porting of legacy code originally written for
605 * preemptive multitasking environments such as pthreads.
607 * A mutex may be unlocked (not owned by any thread), or locked (owned by
610 * A mutex can never be owned by more than one thread simultaneously.
611 * A thread attempting to lock a mutex that is already locked by another
612 * thread is suspended until the owning thread unlocks the mutex.
614 * lthread_mutex_init() initializes the mutex object pointed to by mutex
615 * Optional mutex attributes specified in mutexattr, are reserved for future
616 * use and are currently ignored.
618 * If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
619 * is currently unlocked, it becomes locked and owned by the calling
620 * thread, and lthread_mutex_lock returns immediately. If the mutex is
621 * already locked by another thread, lthread_mutex_lock suspends the calling
622 * thread until the mutex is unlocked.
624 * lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
625 * that it does not block the calling thread if the mutex is already locked
628 * lthread_mutex_unlock() unlocks the specified mutex. The mutex is assumed
629 * to be locked and owned by the calling thread.
631 * lthread_mutex_destroy() destroys a mutex object, freeing its resources.
632 * The mutex must be unlocked with nothing blocked on it before calling
633 * lthread_mutex_destroy.
636 * Optional pointer to string describing the mutex
638 * Pointer to pointer to the mutex to be initialized
640 * Pointer to attribute - unused reserved
644 * EINVAL mutex was not a valid pointer
645 * EAGAIN insufficient resources
649 lthread_mutex_init(char *name, struct lthread_mutex **mutex,
650 const struct lthread_mutexattr *attr);
655 * This function destroys the specified mutex freeing its resources.
656 * The mutex must be unlocked before calling lthread_mutex_destroy.
658 * @see lthread_mutex_init()
661 * Pointer to pointer to the mutex to be initialized
665 * EINVAL mutex was not an initialized mutex
666 * EBUSY mutex was still in use
668 int lthread_mutex_destroy(struct lthread_mutex *mutex);
673 * This function attempts to lock a mutex.
674 * If a thread calls lthread_mutex_lock() on the mutex, then if the mutex
675 * is currently unlocked, it becomes locked and owned by the calling
676 * thread, and lthread_mutex_lock returns immediately. If the mutex is
677 * already locked by another thread, lthread_mutex_lock suspends the calling
678 * thread until the mutex is unlocked.
680 * @see lthread_mutex_init()
683 * Pointer to pointer to the mutex to be initialized
687 * EINVAL mutex was not an initialized mutex
688 * EDEADLOCK the mutex was already owned by the calling thread
691 int lthread_mutex_lock(struct lthread_mutex *mutex);
694 * Try to lock a mutex
696 * This function attempts to lock a mutex.
697 * lthread_mutex_trylock behaves identically to rte_thread_mutex_lock, except
698 * that it does not block the calling thread if the mutex is already locked
702 * @see lthread_mutex_init()
705 * Pointer to pointer to the mutex to be initialized
709 * EINVAL mutex was not an initialized mutex
710 * EBUSY the mutex was already locked by another thread
712 int lthread_mutex_trylock(struct lthread_mutex *mutex);
717 * This function attempts to unlock the specified mutex. The mutex is assumed
718 * to be locked and owned by the calling thread.
720 * The oldest of any threads blocked on the mutex is made ready and may
721 * compete with any other running thread to gain the mutex, it fails it will
725 * Pointer to pointer to the mutex to be initialized
728 * 0 mutex was unlocked
729 * EINVAL mutex was not an initialized mutex
730 * EPERM the mutex was not owned by the calling thread
733 int lthread_mutex_unlock(struct lthread_mutex *mutex);
736 * Initialize a condition variable
738 * This function initializes a condition variable.
740 * Condition variables can be used to communicate changes in the state of data
741 * shared between threads.
743 * @see lthread_cond_wait()
746 * Pointer to optional string describing the condition variable
748 * Pointer to pointer to the condition variable to be initialized
750 * Pointer to optional attribute reserved for future use, currently ignored
754 * EINVAL cond was not a valid pointer
755 * EAGAIN insufficient resources
758 lthread_cond_init(char *name, struct lthread_cond **c,
759 const struct lthread_condattr *attr);
762 * Destroy a condition variable
764 * This function destroys a condition variable that was created with
765 * lthread_cond_init() and releases its resources.
768 * Pointer to pointer to the condition variable to be destroyed
772 * EBUSY condition variable was still in use
773 * EINVAL was not an initialised condition variable
775 int lthread_cond_destroy(struct lthread_cond *cond);
778 * Wait on a condition variable
780 * The function blocks the current thread waiting on the condition variable
781 * specified by cond. The waiting thread unblocks only after another thread
782 * calls lthread_cond_signal, or lthread_cond_broadcast, specifying the
783 * same condition variable.
786 * Pointer to pointer to the condition variable to be waited on
789 * reserved for future use
792 * 0 The condition was signalled ( Success )
793 * EINVAL was not a an initialised condition variable
795 int lthread_cond_wait(struct lthread_cond *c, uint64_t reserved);
798 * Signal a condition variable
800 * The function unblocks one thread waiting for the condition variable cond.
801 * If no threads are waiting on cond, the rte_lthead_cond_signal() function
805 * Pointer to pointer to the condition variable to be signalled
808 * 0 The condition was signalled ( Success )
809 * EINVAL was not a an initialised condition variable
811 int lthread_cond_signal(struct lthread_cond *c);
814 * Broadcast a condition variable
816 * The function unblocks all threads waiting for the condition variable cond.
817 * If no threads are waiting on cond, the rte_lthead_cond_broadcast()
818 * function has no effect.
821 * Pointer to pointer to the condition variable to be signalled
824 * 0 The condition was signalled ( Success )
825 * EINVAL was not a an initialised condition variable
827 int lthread_cond_broadcast(struct lthread_cond *c);
829 #endif /* LTHREAD_H */