4 * Copyright(c) 2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <rte_config.h>
48 #include <rte_per_lcore.h>
50 #include <rte_spinlock.h>
51 #include <rte_common.h>
53 #include "lthread_api.h"
54 #include "lthread_int.h"
55 #include "lthread_mutex.h"
56 #include "lthread_sched.h"
57 #include "lthread_queue.h"
58 #include "lthread_objcache.h"
59 #include "lthread_diag.h"
65 lthread_mutex_init(char *name, struct lthread_mutex **mutex,
66 __rte_unused const struct lthread_mutexattr *attr)
68 struct lthread_mutex *m;
71 return POSIX_ERRNO(EINVAL);
74 m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache);
76 return POSIX_ERRNO(EAGAIN);
78 m->blocked = _lthread_queue_create("blocked queue");
79 if (m->blocked == NULL) {
80 _lthread_objcache_free((THIS_SCHED)->mutex_cache, m);
81 return POSIX_ERRNO(EAGAIN);
85 strncpy(m->name, "no name", sizeof(m->name));
87 strncpy(m->name, name, sizeof(m->name));
88 m->name[sizeof(m->name)-1] = 0;
90 m->root_sched = THIS_SCHED;
93 rte_atomic64_init(&m->count);
95 DIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE);
104 int lthread_mutex_destroy(struct lthread_mutex *m)
106 if ((m == NULL) || (m->blocked == NULL)) {
107 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL));
108 return POSIX_ERRNO(EINVAL);
111 if (m->owner == NULL) {
112 /* try to delete the blocked queue */
113 if (_lthread_queue_destroy(m->blocked) < 0) {
114 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY,
115 m, POSIX_ERRNO(EBUSY));
116 return POSIX_ERRNO(EBUSY);
119 /* free the mutex to cache */
120 _lthread_objcache_free(m->root_sched->mutex_cache, m);
121 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0);
124 /* can't do its still in use */
125 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY));
126 return POSIX_ERRNO(EBUSY);
130 * Try to obtain a mutex
132 int lthread_mutex_lock(struct lthread_mutex *m)
134 struct lthread *lt = THIS_LTHREAD;
136 if ((m == NULL) || (m->blocked == NULL)) {
137 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL));
138 return POSIX_ERRNO(EINVAL);
141 /* allow no recursion */
142 if (m->owner == lt) {
143 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK));
144 return POSIX_ERRNO(EDEADLK);
148 rte_atomic64_inc(&m->count);
150 if (rte_atomic64_cmpset
151 ((uint64_t *) &m->owner, 0, (uint64_t) lt)) {
152 /* happy days, we got the lock */
153 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0);
156 /* spin due to race with unlock when
157 * nothing was blocked
159 } while ((rte_atomic64_read(&m->count) == 1) &&
162 /* queue the current thread in the blocked queue
163 * we defer this to after we return to the scheduler
164 * to ensure that the current thread context is saved
165 * before unlock could result in it being dequeued and
168 DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt);
169 lt->pending_wr_queue = m->blocked;
170 /* now relinquish cpu */
172 /* resumed, must loop and compete for the lock again */
178 /* try to lock a mutex but dont block */
179 int lthread_mutex_trylock(struct lthread_mutex *m)
181 struct lthread *lt = THIS_LTHREAD;
183 if ((m == NULL) || (m->blocked == NULL)) {
184 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EINVAL));
185 return POSIX_ERRNO(EINVAL);
188 if (m->owner == lt) {
190 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EDEADLK));
191 return POSIX_ERRNO(EDEADLK);
194 rte_atomic64_inc(&m->count);
195 if (rte_atomic64_cmpset
196 ((uint64_t *) &m->owner, (uint64_t) NULL, (uint64_t) lt)) {
198 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0);
202 /* failed so return busy */
203 rte_atomic64_dec(&m->count);
204 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY));
205 return POSIX_ERRNO(EBUSY);
211 int lthread_mutex_unlock(struct lthread_mutex *m)
213 struct lthread *lt = THIS_LTHREAD;
214 struct lthread *unblocked;
216 if ((m == NULL) || (m->blocked == NULL)) {
217 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL));
218 return POSIX_ERRNO(EINVAL);
221 /* fail if its owned */
222 if (m->owner != lt || m->owner == NULL) {
223 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM));
224 return POSIX_ERRNO(EPERM);
227 rte_atomic64_dec(&m->count);
228 /* if there are blocked threads then make one ready */
229 while (rte_atomic64_read(&m->count) > 0) {
230 unblocked = _lthread_queue_remove(m->blocked);
232 if (unblocked != NULL) {
233 rte_atomic64_dec(&m->count);
234 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
235 LTHREAD_ASSERT(unblocked->sched != NULL);
236 _ready_queue_insert((struct lthread_sched *)
237 unblocked->sched, unblocked);
241 /* release the lock */
247 * return the diagnostic ref val stored in a mutex
250 lthread_mutex_diag_ref(struct lthread_mutex *m)