regex/mlx5: remove RXP CSR file
[dpdk.git] / examples / performance-thread / common / lthread_mutex.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <stdint.h>
9 #include <stddef.h>
10 #include <limits.h>
11 #include <inttypes.h>
12 #include <unistd.h>
13 #include <pthread.h>
14 #include <fcntl.h>
15 #include <sys/time.h>
16 #include <sys/mman.h>
17
18 #include <rte_per_lcore.h>
19 #include <rte_log.h>
20 #include <rte_spinlock.h>
21 #include <rte_common.h>
22 #include <rte_string_fns.h>
23
24 #include "lthread_api.h"
25 #include "lthread_int.h"
26 #include "lthread_mutex.h"
27 #include "lthread_sched.h"
28 #include "lthread_queue.h"
29 #include "lthread_objcache.h"
30 #include "lthread_diag.h"
31
32 /*
33  * Create a mutex
34  */
35 int
36 lthread_mutex_init(char *name, struct lthread_mutex **mutex,
37                    __rte_unused const struct lthread_mutexattr *attr)
38 {
39         struct lthread_mutex *m;
40
41         if (mutex == NULL)
42                 return POSIX_ERRNO(EINVAL);
43
44
45         m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache);
46         if (m == NULL)
47                 return POSIX_ERRNO(EAGAIN);
48
49         m->blocked = _lthread_queue_create("blocked queue");
50         if (m->blocked == NULL) {
51                 _lthread_objcache_free((THIS_SCHED)->mutex_cache, m);
52                 return POSIX_ERRNO(EAGAIN);
53         }
54
55         if (name == NULL)
56                 strlcpy(m->name, "no name", sizeof(m->name));
57         else
58                 strlcpy(m->name, name, sizeof(m->name));
59
60         m->root_sched = THIS_SCHED;
61         m->owner = NULL;
62
63         __atomic_store_n(&m->count, 0, __ATOMIC_RELAXED);
64
65         DIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE);
66         /* success */
67         (*mutex) = m;
68         return 0;
69 }
70
71 /*
72  * Destroy a mutex
73  */
74 int lthread_mutex_destroy(struct lthread_mutex *m)
75 {
76         if ((m == NULL) || (m->blocked == NULL)) {
77                 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL));
78                 return POSIX_ERRNO(EINVAL);
79         }
80
81         if (m->owner == NULL) {
82                 /* try to delete the blocked queue */
83                 if (_lthread_queue_destroy(m->blocked) < 0) {
84                         DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY,
85                                         m, POSIX_ERRNO(EBUSY));
86                         return POSIX_ERRNO(EBUSY);
87                 }
88
89                 /* free the mutex to cache */
90                 _lthread_objcache_free(m->root_sched->mutex_cache, m);
91                 DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0);
92                 return 0;
93         }
94         /* can't do its still in use */
95         DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY));
96         return POSIX_ERRNO(EBUSY);
97 }
98
99 /*
100  * Try to obtain a mutex
101  */
102 int lthread_mutex_lock(struct lthread_mutex *m)
103 {
104         struct lthread *lt = THIS_LTHREAD;
105
106         if ((m == NULL) || (m->blocked == NULL)) {
107                 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL));
108                 return POSIX_ERRNO(EINVAL);
109         }
110
111         /* allow no recursion */
112         if (m->owner == lt) {
113                 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK));
114                 return POSIX_ERRNO(EDEADLK);
115         }
116
117         for (;;) {
118                 __atomic_fetch_add(&m->count, 1, __ATOMIC_RELAXED);
119                 do {
120                         uint64_t lt_init = 0;
121                         if (__atomic_compare_exchange_n((uint64_t *) &m->owner, &lt_init,
122                                 (uint64_t) lt, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
123                                 /* happy days, we got the lock */
124                                 DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0);
125                                 return 0;
126                         }
127                         /* spin due to race with unlock when
128                         * nothing was blocked
129                         */
130                 } while ((__atomic_load_n(&m->count, __ATOMIC_RELAXED) == 1) &&
131                                 (m->owner == NULL));
132
133                 /* queue the current thread in the blocked queue
134                  * we defer this to after we return to the scheduler
135                  * to ensure that the current thread context is saved
136                  * before unlock could result in it being dequeued and
137                  * resumed
138                  */
139                 DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt);
140                 lt->pending_wr_queue = m->blocked;
141                 /* now relinquish cpu */
142                 _suspend();
143                 /* resumed, must loop and compete for the lock again */
144         }
145         return 0;
146 }
147
148 /* try to lock a mutex but don't block */
149 int lthread_mutex_trylock(struct lthread_mutex *m)
150 {
151         struct lthread *lt = THIS_LTHREAD;
152
153         if ((m == NULL) || (m->blocked == NULL)) {
154                 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EINVAL));
155                 return POSIX_ERRNO(EINVAL);
156         }
157
158         if (m->owner == lt) {
159                 /* no recursion */
160                 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EDEADLK));
161                 return POSIX_ERRNO(EDEADLK);
162         }
163
164         __atomic_fetch_add(&m->count, 1, __ATOMIC_RELAXED);
165         uint64_t lt_init = 0;
166         if (__atomic_compare_exchange_n((uint64_t *) &m->owner, &lt_init,
167                 (uint64_t) lt, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
168                 /* got the lock */
169                 DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0);
170                 return 0;
171         }
172
173         /* failed so return busy */
174         __atomic_fetch_sub(&m->count, 1, __ATOMIC_RELAXED);
175         DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY));
176         return POSIX_ERRNO(EBUSY);
177 }
178
179 /*
180  * Unlock a mutex
181  */
182 int lthread_mutex_unlock(struct lthread_mutex *m)
183 {
184         struct lthread *lt = THIS_LTHREAD;
185         struct lthread *unblocked;
186
187         if ((m == NULL) || (m->blocked == NULL)) {
188                 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL));
189                 return POSIX_ERRNO(EINVAL);
190         }
191
192         /* fail if its owned */
193         if (m->owner != lt || m->owner == NULL) {
194                 DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM));
195                 return POSIX_ERRNO(EPERM);
196         }
197
198         __atomic_fetch_sub(&m->count, 1, __ATOMIC_RELAXED);
199         /* if there are blocked threads then make one ready */
200         while (__atomic_load_n(&m->count, __ATOMIC_RELAXED) > 0) {
201                 unblocked = _lthread_queue_remove(m->blocked);
202
203                 if (unblocked != NULL) {
204                         __atomic_fetch_sub(&m->count, 1, __ATOMIC_RELAXED);
205                         DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked);
206                         RTE_ASSERT(unblocked->sched != NULL);
207                         _ready_queue_insert((struct lthread_sched *)
208                                             unblocked->sched, unblocked);
209                         break;
210                 }
211         }
212         /* release the lock */
213         m->owner = NULL;
214         return 0;
215 }
216
217 /*
218  * return the diagnostic ref val stored in a mutex
219  */
220 uint64_t
221 lthread_mutex_diag_ref(struct lthread_mutex *m)
222 {
223         if (m == NULL)
224                 return 0;
225         return m->diag_ref;
226 }