ticketlock: introduce fair ticket based locking
[dpdk.git] / lib / librte_eal / common / include / generic / rte_ticketlock.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Arm Limited
3  */
4
5 #ifndef _RTE_TICKETLOCK_H_
6 #define _RTE_TICKETLOCK_H_
7
8 /**
9  * @file
10  *
11  * RTE ticket locks
12  *
13  * This file defines an API for ticket locks, which give each waiting
14  * thread a ticket and take the lock one by one, first come, first
15  * serviced.
16  *
17  * All locks must be initialised before use, and only initialised once.
18  *
19  */
20
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24
25 #include <rte_common.h>
26 #include <rte_lcore.h>
27 #include <rte_pause.h>
28
29 /**
30  * The rte_ticketlock_t type.
31  */
32 typedef union {
33         uint32_t tickets;
34         struct {
35                 uint16_t current;
36                 uint16_t next;
37         } s;
38 } rte_ticketlock_t;
39
40 /**
41  * A static ticketlock initializer.
42  */
43 #define RTE_TICKETLOCK_INITIALIZER { 0 }
44
45 /**
46  * Initialize the ticketlock to an unlocked state.
47  *
48  * @param tl
49  *   A pointer to the ticketlock.
50  */
51 static inline __rte_experimental void
52 rte_ticketlock_init(rte_ticketlock_t *tl)
53 {
54         __atomic_store_n(&tl->tickets, 0, __ATOMIC_RELAXED);
55 }
56
57 /**
58  * Take the ticketlock.
59  *
60  * @param tl
61  *   A pointer to the ticketlock.
62  */
63 static inline __rte_experimental void
64 rte_ticketlock_lock(rte_ticketlock_t *tl)
65 {
66         uint16_t me = __atomic_fetch_add(&tl->s.next, 1, __ATOMIC_RELAXED);
67         while (__atomic_load_n(&tl->s.current, __ATOMIC_ACQUIRE) != me)
68                 rte_pause();
69 }
70
71 /**
72  * Release the ticketlock.
73  *
74  * @param tl
75  *   A pointer to the ticketlock.
76  */
77 static inline __rte_experimental void
78 rte_ticketlock_unlock(rte_ticketlock_t *tl)
79 {
80         uint16_t i = __atomic_load_n(&tl->s.current, __ATOMIC_RELAXED);
81         __atomic_store_n(&tl->s.current, i + 1, __ATOMIC_RELEASE);
82 }
83
84 /**
85  * Try to take the lock.
86  *
87  * @param tl
88  *   A pointer to the ticketlock.
89  * @return
90  *   1 if the lock is successfully taken; 0 otherwise.
91  */
92 static inline __rte_experimental int
93 rte_ticketlock_trylock(rte_ticketlock_t *tl)
94 {
95         rte_ticketlock_t old, new;
96         old.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_RELAXED);
97         new.tickets = old.tickets;
98         new.s.next++;
99         if (old.s.next == old.s.current) {
100                 if (__atomic_compare_exchange_n(&tl->tickets, &old.tickets,
101                     new.tickets, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
102                         return 1;
103         }
104
105         return 0;
106 }
107
108 /**
109  * Test if the lock is taken.
110  *
111  * @param tl
112  *   A pointer to the ticketlock.
113  * @return
114  *   1 if the lock is currently taken; 0 otherwise.
115  */
116 static inline __rte_experimental int
117 rte_ticketlock_is_locked(rte_ticketlock_t *tl)
118 {
119         rte_ticketlock_t tic;
120         tic.tickets = __atomic_load_n(&tl->tickets, __ATOMIC_ACQUIRE);
121         return (tic.s.current != tic.s.next);
122 }
123
124 /**
125  * The rte_ticketlock_recursive_t type.
126  */
127 #define TICKET_LOCK_INVALID_ID -1
128
129 typedef struct {
130         rte_ticketlock_t tl; /**< the actual ticketlock */
131         int user; /**< core id using lock, TICKET_LOCK_INVALID_ID for unused */
132         unsigned int count; /**< count of time this lock has been called */
133 } rte_ticketlock_recursive_t;
134
135 /**
136  * A static recursive ticketlock initializer.
137  */
138 #define RTE_TICKETLOCK_RECURSIVE_INITIALIZER {RTE_TICKETLOCK_INITIALIZER, \
139                                               TICKET_LOCK_INVALID_ID, 0}
140
141 /**
142  * Initialize the recursive ticketlock to an unlocked state.
143  *
144  * @param tlr
145  *   A pointer to the recursive ticketlock.
146  */
147 static inline __rte_experimental void
148 rte_ticketlock_recursive_init(rte_ticketlock_recursive_t *tlr)
149 {
150         rte_ticketlock_init(&tlr->tl);
151         __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID, __ATOMIC_RELAXED);
152         tlr->count = 0;
153 }
154
155 /**
156  * Take the recursive ticketlock.
157  *
158  * @param tlr
159  *   A pointer to the recursive ticketlock.
160  */
161 static inline __rte_experimental void
162 rte_ticketlock_recursive_lock(rte_ticketlock_recursive_t *tlr)
163 {
164         int id = rte_gettid();
165
166         if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
167                 rte_ticketlock_lock(&tlr->tl);
168                 __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
169         }
170         tlr->count++;
171 }
172
173 /**
174  * Release the recursive ticketlock.
175  *
176  * @param tlr
177  *   A pointer to the recursive ticketlock.
178  */
179 static inline __rte_experimental void
180 rte_ticketlock_recursive_unlock(rte_ticketlock_recursive_t *tlr)
181 {
182         if (--(tlr->count) == 0) {
183                 __atomic_store_n(&tlr->user, TICKET_LOCK_INVALID_ID,
184                                  __ATOMIC_RELAXED);
185                 rte_ticketlock_unlock(&tlr->tl);
186         }
187 }
188
189 /**
190  * Try to take the recursive lock.
191  *
192  * @param tlr
193  *   A pointer to the recursive ticketlock.
194  * @return
195  *   1 if the lock is successfully taken; 0 otherwise.
196  */
197 static inline __rte_experimental int
198 rte_ticketlock_recursive_trylock(rte_ticketlock_recursive_t *tlr)
199 {
200         int id = rte_gettid();
201
202         if (__atomic_load_n(&tlr->user, __ATOMIC_RELAXED) != id) {
203                 if (rte_ticketlock_trylock(&tlr->tl) == 0)
204                         return 0;
205                 __atomic_store_n(&tlr->user, id, __ATOMIC_RELAXED);
206         }
207         tlr->count++;
208         return 1;
209 }
210
211 #ifdef __cplusplus
212 }
213 #endif
214
215 #endif /* _RTE_TICKETLOCK_H_ */