enforce experimental tag at beginning of declarations
[dpdk.git] / lib / librte_eal / common / include / generic / rte_ticketlock.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Arm Limited
3  */
4
5 #ifndef _RTE_TICKETLOCK_H_
6 #define _RTE_TICKETLOCK_H_
7
8 /**
9  * @file
10  *
11  * RTE ticket locks
12  *
13  * This file defines an API for ticket locks, which give each waiting
14  * thread a ticket and take the lock one by one, first come, first
15  * serviced.
16  *
17  * All locks must be initialised before use, and only initialised once.
18  *
19  */
20
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24
25 #include <rte_common.h>
26 #include <rte_lcore.h>
27 #include <rte_pause.h>
28
29 /**
30  * The rte_ticketlock_t type.
31  */
32 typedef union {
33         uint32_t tickets;
34         struct {
35                 uint16_t current;
36                 uint16_t next;
37         } s;
38 } rte_ticketlock_t;
39
40 /**
41  * A static ticketlock initializer.
42  */
43 #define RTE_TICKETLOCK_INITIALIZER { 0 }
44
45 /**
46  * Initialize the ticketlock to an unlocked state.
47  *
48  * @param tl
49  *   A pointer to the ticketlock.
50  */
51 __rte_experimental
52 static inline void
53 rte_ticketlock_init(rte_ticketlock_t *tl)
54 {
55         __atomic_store_n(&tl->tickets, 0, __ATOMIC_RELAXED);
56 }
57
58 /**
59  * Take the ticketlock.
60  *
61  * @param tl
62  *   A pointer to the ticketlock.
63  */
64 __rte_experimental
65 static inline void
66 rte_ticketlock_lock(rte_ticketlock_t *tl)
67 {
68         uint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED);
69         while (__atomic_load_n(&tl->s.current, __ATOMIC_ACQUIRE) != me)
70                 rte_pause();
71 }
72
73 /**
74  * Release the ticketlock.
75  *
76  * @param tl
77  *   A pointer to the ticketlock.
78  */
79 __rte_experimental
80 static inline void
81 rte_ticketlock_unlock(rte_ticketlock_t *tl)
82 {
83         uint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED);
84         __atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE);
85 }
86
87 /**
88  * Try to take the lock.
89  *
90  * @param tl
91  *   A pointer to the ticketlock.
92  * @return
93  *   1 if the lock is successfully taken; 0 otherwise.
94  */
95 __rte_experimental
96 static inline int
97 rte_ticketlock_trylock(rte_ticketlock_t *tl)
98 {
99         rte_ticketlock_t old, new;
100         old.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED);
101         new.tickets = old.tickets;
102         new.s.next++;
103         if (old.s.next == old.s.current) {
104                 if (__atomic_compare_exchange_n(&tl->tickets, &old.tickets,
105                     new.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
106                         return 1;
107         }
108
109         return 0;
110 }
111
112 /**
113  * Test if the lock is taken.
114  *
115  * @param tl
116  *   A pointer to the ticketlock.
117  * @return
118  *   1 if the lock is currently taken; 0 otherwise.
119  */
120 __rte_experimental
121 static inline int
122 rte_ticketlock_is_locked(rte_ticketlock_t *tl)
123 {
124         rte_ticketlock_t tic;
125         tic.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_ACQUIRE);
126         return (tic.s.current != tic.s.next);
127 }
128
129 /**
130  * The rte_ticketlock_recursive_t type.
131  */
132 #define TICKET_LOCK_INVALID_ID -1
133
134 typedef struct {
135         rte_ticketlock_t tl; /**< the actual ticketlock */
136         int user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */
137         unsigned int count; /**< count of time this lock has been called */
138 } rte_ticketlock_recursive_t;
139
140 /**
141  * A static recursive ticketlock initializer.
142  */
143 #define RTE_TICKETLOCK_RECURSIVE_INITIALIZER {RTE_TICKETLOCK_INITIALIZER, \
144                                               TICKET_LOCK_INVALID_ID, 0}
145
146 /**
147  * Initialize the recursive ticketlock to an unlocked state.
148  *
149  * @param tlr
150  *   A pointer to the recursive ticketlock.
151  */
152 __rte_experimental
153 static inline void
154 rte_ticketlock_recursive_init(rte_ticketlock_recursive_t *tlr)
155 {
156         rte_ticketlock_init(&tlr->tl);
157         __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, __ATOMIC_RELAXED);
158         tlr->count = 0;
159 }
160
161 /**
162  * Take the recursive ticketlock.
163  *
164  * @param tlr
165  *   A pointer to the recursive ticketlock.
166  */
167 __rte_experimental
168 static inline void
169 rte_ticketlock_recursive_lock(rte_ticketlock_recursive_t *tlr)
170 {
171         int id = rte_gettid();
172
173         if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
174                 rte_ticketlock_lock(&tlr->tl);
175                 __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
176         }
177         tlr->count++;
178 }
179
180 /**
181  * Release the recursive ticketlock.
182  *
183  * @param tlr
184  *   A pointer to the recursive ticketlock.
185  */
186 __rte_experimental
187 static inline void
188 rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t *tlr)
189 {
190         if (--(tlr->count) == 0) {
191                 __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID,
192                                  __ATOMIC_RELAXED);
193                 rte_ticketlock_unlock(&tlr->tl);
194         }
195 }
196
197 /**
198  * Try to take the recursive lock.
199  *
200  * @param tlr
201  *   A pointer to the recursive ticketlock.
202  * @return
203  *   1 if the lock is successfully taken; 0 otherwise.
204  */
205 __rte_experimental
206 static inline int
207 rte_ticketlock_recursive_trylock(rte_ticketlock_recursive_t *tlr)
208 {
209         int id = rte_gettid();
210
211         if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
212                 if (rte_ticketlock_trylock(&tlr->tl) == 0)
213                         return 0;
214                 __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
215         }
216         tlr->count++;
217         return 1;
218 }
219
220 #ifdef __cplusplus
221 }
222 #endif
223
224 #endif /* _RTE_TICKETLOCK_H_ */