1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015 Intel Corporation
18 #include <rte_per_lcore.h>
20 #include <rte_spinlock.h>
21 #include <rte_common.h>
22 #include <rte_string_fns.h>
24 #include "lthread_api.h"
25 #include "lthread_int.h"
26 #include "lthread_mutex.h"
27 #include "lthread_sched.h"
28 #include "lthread_queue.h"
29 #include "lthread_objcache.h"
30 #include "lthread_diag.h"
36 lthread_mutex_init(char *name, struct lthread_mutex **mutex,
37 __rte_unused const struct lthread_mutexattr *attr)
39 struct lthread_mutex *m;
42 return POSIX_ERRNO(EINVAL);
45 m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache);
47 return POSIX_ERRNO(EAGAIN);
49 m->blocked = _lthread_queue_create("blocked queue");
50 if (m->blocked == NULL) {
51 _lthread_objcache_free((THIS_SCHED)->mutex_cache, m);
52 return POSIX_ERRNO(EAGAIN);
56 strlcpy(m->name, "no name", sizeof(m->name));
58 strlcpy(m->name, name, sizeof(m->name));
60 m->root_sched = THIS_SCHED;
63 __atomic_store_n(&m->count, 0, __ATOMIC_RELAXED);
65 DIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE);
74 int lthread_mutex_destroy(struct lthread_mutex *m)
76 if ((m == NULL) || (m->blocked == NULL)) {
77 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL));
78 return POSIX_ERRNO(EINVAL);
81 if (m->owner == NULL) {
82 /* try to delete the blocked queue */
83 if (_lthread_queue_destroy(m->blocked) < 0) {
84 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY,
85 m, POSIX_ERRNO(EBUSY));
86 return POSIX_ERRNO(EBUSY);
89 /* free the mutex to cache */
90 _lthread_objcache_free(m->root_sched->mutex_cache, m);
91 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0);
94 /* can't do its still in use */
95 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY));
96 return POSIX_ERRNO(EBUSY);
100 * Try to obtain a mutex
102 int lthread_mutex_lock(struct lthread_mutex *m)
104 struct lthread *lt = THIS_LTHREAD;
106 if ((m == NULL) || (m->blocked == NULL)) {
107 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL));
108 return POSIX_ERRNO(EINVAL);
111 /* allow no recursion */
112 if (m->owner == lt) {
113 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK));
114 return POSIX_ERRNO(EDEADLK);
118 __atomic_fetch_add(&m->count, 1, __ATOMIC_RELAXED);
120 uint64_t lt_init = 0;
121 if (__atomic_compare_exchange_n((uint64_t *) &m->owner, <_init,
122 (uint64_t) lt, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
123 /* happy days, we got the lock */
124 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0);
127 /* spin due to race with unlock when
128 * nothing was blocked
130 } while ((__atomic_load_n(&m->count, __ATOMIC_RELAXED) == 1) &&
133 /* queue the current thread in the blocked queue
134 * we defer this to after we return to the scheduler
135 * to ensure that the current thread context is saved
136 * before unlock could result in it being dequeued and
139 DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt);
140 lt->pending_wr_queue = m->blocked;
141 /* now relinquish cpu */
143 /* resumed, must loop and compete for the lock again */
148 /* try to lock a mutex but don't block */
149 int lthread_mutex_trylock(struct lthread_mutex *m)
151 struct lthread *lt = THIS_LTHREAD;
153 if ((m == NULL) || (m->blocked == NULL)) {
154 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EINVAL));
155 return POSIX_ERRNO(EINVAL);
158 if (m->owner == lt) {
160 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EDEADLK));
161 return POSIX_ERRNO(EDEADLK);
164 __atomic_fetch_add(&m->count, 1, __ATOMIC_RELAXED);
165 uint64_t lt_init = 0;
166 if (__atomic_compare_exchange_n((uint64_t *) &m->owner, <_init,
167 (uint64_t) lt, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
169 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0);
173 /* failed so return busy */
174 __atomic_fetch_sub(&m->count, 1, __ATOMIC_RELAXED);
175 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY));
176 return POSIX_ERRNO(EBUSY);
182 int lthread_mutex_unlock(struct lthread_mutex *m)
184 struct lthread *lt = THIS_LTHREAD;
185 struct lthread *unblocked;
187 if ((m == NULL) || (m->blocked == NULL)) {
188 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL));
189 return POSIX_ERRNO(EINVAL);
192 /* fail if its owned */
193 if (m->owner != lt || m->owner == NULL) {
194 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM));
195 return POSIX_ERRNO(EPERM);
198 __atomic_fetch_sub(&m->count, 1, __ATOMIC_RELAXED);
199 /* if there are blocked threads then make one ready */
200 while (__atomic_load_n(&m->count, __ATOMIC_RELAXED) > 0) {
201 unblocked = _lthread_queue_remove(m->blocked);
203 if (unblocked != NULL) {
204 __atomic_fetch_sub(&m->count, 1, __ATOMIC_RELAXED);
205 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
206 RTE_ASSERT(unblocked->sched != NULL);
207 _ready_queue_insert((struct lthread_sched *)
208 unblocked->sched, unblocked);
212 /* release the lock */
218 * return the diagnostic ref val stored in a mutex
221 lthread_mutex_diag_ref(struct lthread_mutex *m)