4 * Copyright(c) 2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_config.h>
49 #include <rte_malloc.h>
52 #include <rte_atomic_64.h>
54 #include "lthread_tls.h"
55 #include "lthread_queue.h"
56 #include "lthread_objcache.h"
57 #include "lthread_sched.h"
59 static struct rte_ring *key_pool;
60 static uint64_t key_pool_init;
62 /* needed to cause section start and end to be defined */
63 RTE_DEFINE_PER_LTHREAD(void *, dummy);
65 static struct lthread_key key_table[LTHREAD_MAX_KEYS];
67 void lthread_tls_ctor(void) __attribute__((constructor));
69 void lthread_tls_ctor(void)
76 * Initialize a pool of keys
77 * These are unique tokens that can be obtained by threads
78 * calling lthread_key_create()
80 void _lthread_key_pool_init(void)
82 static struct rte_ring *pool;
83 struct lthread_key *new_key;
84 char name[MAX_LTHREAD_NAME_SIZE];
86 bzero(key_table, sizeof(key_table));
88 /* only one lcore should do this */
89 if (rte_atomic64_cmpset(&key_pool_init, 0, 1)) {
92 MAX_LTHREAD_NAME_SIZE,
93 "lthread_key_pool_%d",
96 pool = rte_ring_create(name,
97 LTHREAD_MAX_KEYS, 0, 0);
102 for (i = 1; i < LTHREAD_MAX_KEYS; i++) {
103 new_key = &key_table[i];
104 rte_ring_mp_enqueue((struct rte_ring *)pool,
109 /* other lcores wait here till done */
110 while (key_pool == NULL) {
111 rte_compiler_barrier();
118 * this means getting a key from the the pool
120 int lthread_key_create(unsigned int *key, tls_destructor_func destructor)
123 return POSIX_ERRNO(EINVAL);
125 struct lthread_key *new_key;
127 if (rte_ring_mc_dequeue((struct rte_ring *)key_pool, (void **)&new_key)
129 new_key->destructor = destructor;
130 *key = (new_key - key_table);
134 return POSIX_ERRNO(EAGAIN);
141 int lthread_key_delete(unsigned int k)
143 struct lthread_key *key;
145 key = (struct lthread_key *) &key_table[k];
147 if (k > LTHREAD_MAX_KEYS)
148 return POSIX_ERRNO(EINVAL);
150 key->destructor = NULL;
151 rte_ring_mp_enqueue((struct rte_ring *)key_pool,
159 * Break association for all keys in use by this thread
160 * invoke the destructor if available.
161 * Since a destructor can create keys we could enter an infinite loop
162 * therefore we give up after LTHREAD_DESTRUCTOR_ITERATIONS
163 * the behavior is modelled on pthread
165 void _lthread_tls_destroy(struct lthread *lt)
171 for (i = 0; i < LTHREAD_DESTRUCTOR_ITERATIONS; i++) {
173 for (k = 1; k < LTHREAD_MAX_KEYS; k++) {
175 /* no keys in use ? */
176 nb_keys = lt->tls->nb_keys_inuse;
180 /* this key not in use ? */
181 if (lt->tls->data[k] == NULL)
184 /* remove this key */
185 data = lt->tls->data[k];
186 lt->tls->data[k] = NULL;
187 lt->tls->nb_keys_inuse = nb_keys-1;
189 /* invoke destructor */
190 if (key_table[k].destructor != NULL)
191 key_table[k].destructor(data);
197 * Return the pointer associated with a key
198 * If the key is no longer valid return NULL
201 *lthread_getspecific(unsigned int k)
204 if (k > LTHREAD_MAX_KEYS)
207 return THIS_LTHREAD->tls->data[k];
211 * Set a value against a key
212 * If the key is no longer valid return an error
215 int lthread_setspecific(unsigned int k, const void *data)
217 if (k > LTHREAD_MAX_KEYS)
218 return POSIX_ERRNO(EINVAL);
220 int n = THIS_LTHREAD->tls->nb_keys_inuse;
222 /* discard const qualifier */
223 char *p = (char *) (uintptr_t) data;
227 if (THIS_LTHREAD->tls->data[k] == NULL)
228 THIS_LTHREAD->tls->nb_keys_inuse = n+1;
231 THIS_LTHREAD->tls->data[k] = (void *) p;
236 * Allocate data for TLS cache
238 void _lthread_tls_alloc(struct lthread *lt)
240 struct lthread_tls *tls;
242 tls = _lthread_objcache_alloc((THIS_SCHED)->tls_cache);
244 LTHREAD_ASSERT(tls != NULL);
246 tls->root_sched = (THIS_SCHED);
249 /* allocate data for TLS varaiables using RTE_PER_LTHREAD macros */
250 if (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE) {
251 lt->per_lthread_data =
252 _lthread_objcache_alloc((THIS_SCHED)->per_lthread_cache);