4 * Copyright(c) 2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <rte_per_lcore.h>
49 #include <rte_spinlock.h>
50 #include <rte_common.h>
52 #include "lthread_api.h"
53 #include "lthread_int.h"
54 #include "lthread_mutex.h"
55 #include "lthread_sched.h"
56 #include "lthread_queue.h"
57 #include "lthread_objcache.h"
58 #include "lthread_diag.h"
64 lthread_mutex_init(char *name, struct lthread_mutex **mutex,
65 __rte_unused const struct lthread_mutexattr *attr)
67 struct lthread_mutex *m;
70 return POSIX_ERRNO(EINVAL);
73 m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache);
75 return POSIX_ERRNO(EAGAIN);
77 m->blocked = _lthread_queue_create("blocked queue");
78 if (m->blocked == NULL) {
79 _lthread_objcache_free((THIS_SCHED)->mutex_cache, m);
80 return POSIX_ERRNO(EAGAIN);
84 strncpy(m->name, "no name", sizeof(m->name));
86 strncpy(m->name, name, sizeof(m->name));
87 m->name[sizeof(m->name)-1] = 0;
89 m->root_sched = THIS_SCHED;
92 rte_atomic64_init(&m->count);
94 DIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE);
103 int lthread_mutex_destroy(struct lthread_mutex *m)
105 if ((m == NULL) || (m->blocked == NULL)) {
106 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL));
107 return POSIX_ERRNO(EINVAL);
110 if (m->owner == NULL) {
111 /* try to delete the blocked queue */
112 if (_lthread_queue_destroy(m->blocked) < 0) {
113 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY,
114 m, POSIX_ERRNO(EBUSY));
115 return POSIX_ERRNO(EBUSY);
118 /* free the mutex to cache */
119 _lthread_objcache_free(m->root_sched->mutex_cache, m);
120 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0);
123 /* can't do its still in use */
124 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY));
125 return POSIX_ERRNO(EBUSY);
129 * Try to obtain a mutex
131 int lthread_mutex_lock(struct lthread_mutex *m)
133 struct lthread *lt = THIS_LTHREAD;
135 if ((m == NULL) || (m->blocked == NULL)) {
136 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL));
137 return POSIX_ERRNO(EINVAL);
140 /* allow no recursion */
141 if (m->owner == lt) {
142 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK));
143 return POSIX_ERRNO(EDEADLK);
147 rte_atomic64_inc(&m->count);
149 if (rte_atomic64_cmpset
150 ((uint64_t *) &m->owner, 0, (uint64_t) lt)) {
151 /* happy days, we got the lock */
152 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0);
155 /* spin due to race with unlock when
156 * nothing was blocked
158 } while ((rte_atomic64_read(&m->count) == 1) &&
161 /* queue the current thread in the blocked queue
162 * we defer this to after we return to the scheduler
163 * to ensure that the current thread context is saved
164 * before unlock could result in it being dequeued and
167 DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt);
168 lt->pending_wr_queue = m->blocked;
169 /* now relinquish cpu */
171 /* resumed, must loop and compete for the lock again */
176 /* try to lock a mutex but dont block */
177 int lthread_mutex_trylock(struct lthread_mutex *m)
179 struct lthread *lt = THIS_LTHREAD;
181 if ((m == NULL) || (m->blocked == NULL)) {
182 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EINVAL));
183 return POSIX_ERRNO(EINVAL);
186 if (m->owner == lt) {
188 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EDEADLK));
189 return POSIX_ERRNO(EDEADLK);
192 rte_atomic64_inc(&m->count);
193 if (rte_atomic64_cmpset
194 ((uint64_t *) &m->owner, (uint64_t) NULL, (uint64_t) lt)) {
196 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0);
200 /* failed so return busy */
201 rte_atomic64_dec(&m->count);
202 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY));
203 return POSIX_ERRNO(EBUSY);
209 int lthread_mutex_unlock(struct lthread_mutex *m)
211 struct lthread *lt = THIS_LTHREAD;
212 struct lthread *unblocked;
214 if ((m == NULL) || (m->blocked == NULL)) {
215 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL));
216 return POSIX_ERRNO(EINVAL);
219 /* fail if its owned */
220 if (m->owner != lt || m->owner == NULL) {
221 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM));
222 return POSIX_ERRNO(EPERM);
225 rte_atomic64_dec(&m->count);
226 /* if there are blocked threads then make one ready */
227 while (rte_atomic64_read(&m->count) > 0) {
228 unblocked = _lthread_queue_remove(m->blocked);
230 if (unblocked != NULL) {
231 rte_atomic64_dec(&m->count);
232 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
233 RTE_ASSERT(unblocked->sched != NULL);
234 _ready_queue_insert((struct lthread_sched *)
235 unblocked->sched, unblocked);
239 /* release the lock */
245 * return the diagnostic ref val stored in a mutex
248 lthread_mutex_diag_ref(struct lthread_mutex *m)