remove useless include of EAL memory config header
[dpdk.git] / lib / librte_rcu / rte_rcu_qsbr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2018 Arm Limited
4  */
5
6 #include <stdio.h>
7 #include <string.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <errno.h>
11
12 #include <rte_common.h>
13 #include <rte_log.h>
14 #include <rte_memory.h>
15 #include <rte_malloc.h>
16 #include <rte_eal.h>
17 #include <rte_atomic.h>
18 #include <rte_per_lcore.h>
19 #include <rte_lcore.h>
20 #include <rte_errno.h>
21
22 #include "rte_rcu_qsbr.h"
23
24 /* Get the memory size of QSBR variable */
25 size_t
26 rte_rcu_qsbr_get_memsize(uint32_t max_threads)
27 {
28         size_t sz;
29
30         if (max_threads == 0) {
31                 rte_log(RTE_LOG_ERR, rte_rcu_log_type,
32                         "%s(): Invalid max_threads %u\n",
33                         __func__, max_threads);
34                 rte_errno = EINVAL;
35
36                 return 1;
37         }
38
39         sz = sizeof(struct rte_rcu_qsbr);
40
41         /* Add the size of quiescent state counter array */
42         sz += sizeof(struct rte_rcu_qsbr_cnt) * max_threads;
43
44         /* Add the size of the registered thread ID bitmap array */
45         sz += __RTE_QSBR_THRID_ARRAY_SIZE(max_threads);
46
47         return sz;
48 }
49
50 /* Initialize a quiescent state variable */
51 int
52 rte_rcu_qsbr_init(struct rte_rcu_qsbr *v, uint32_t max_threads)
53 {
54         size_t sz;
55
56         if (v == NULL) {
57                 rte_log(RTE_LOG_ERR, rte_rcu_log_type,
58                         "%s(): Invalid input parameter\n", __func__);
59                 rte_errno = EINVAL;
60
61                 return 1;
62         }
63
64         sz = rte_rcu_qsbr_get_memsize(max_threads);
65         if (sz == 1)
66                 return 1;
67
68         /* Set all the threads to offline */
69         memset(v, 0, sz);
70         v->max_threads = max_threads;
71         v->num_elems = RTE_ALIGN_MUL_CEIL(max_threads,
72                         __RTE_QSBR_THRID_ARRAY_ELM_SIZE) /
73                         __RTE_QSBR_THRID_ARRAY_ELM_SIZE;
74         v->token = __RTE_QSBR_CNT_INIT;
75
76         return 0;
77 }
78
79 /* Register a reader thread to report its quiescent state
80  * on a QS variable.
81  */
82 int
83 rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
84 {
85         unsigned int i, id, success;
86         uint64_t old_bmap, new_bmap;
87
88         if (v == NULL || thread_id >= v->max_threads) {
89                 rte_log(RTE_LOG_ERR, rte_rcu_log_type,
90                         "%s(): Invalid input parameter\n", __func__);
91                 rte_errno = EINVAL;
92
93                 return 1;
94         }
95
96         __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
97                                 v->qsbr_cnt[thread_id].lock_cnt);
98
99         id = thread_id & __RTE_QSBR_THRID_MASK;
100         i = thread_id >> __RTE_QSBR_THRID_INDEX_SHIFT;
101
102         /* Make sure that the counter for registered threads does not
103          * go out of sync. Hence, additional checks are required.
104          */
105         /* Check if the thread is already registered */
106         old_bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
107                                         __ATOMIC_RELAXED);
108         if (old_bmap & 1UL << id)
109                 return 0;
110
111         do {
112                 new_bmap = old_bmap | (1UL << id);
113                 success = __atomic_compare_exchange(
114                                         __RTE_QSBR_THRID_ARRAY_ELM(v, i),
115                                         &old_bmap, &new_bmap, 0,
116                                         __ATOMIC_RELEASE, __ATOMIC_RELAXED);
117
118                 if (success)
119                         __atomic_fetch_add(&v->num_threads,
120                                                 1, __ATOMIC_RELAXED);
121                 else if (old_bmap & (1UL << id))
122                         /* Someone else registered this thread.
123                          * Counter should not be incremented.
124                          */
125                         return 0;
126         } while (success == 0);
127
128         return 0;
129 }
130
131 /* Remove a reader thread, from the list of threads reporting their
132  * quiescent state on a QS variable.
133  */
134 int
135 rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
136 {
137         unsigned int i, id, success;
138         uint64_t old_bmap, new_bmap;
139
140         if (v == NULL || thread_id >= v->max_threads) {
141                 rte_log(RTE_LOG_ERR, rte_rcu_log_type,
142                         "%s(): Invalid input parameter\n", __func__);
143                 rte_errno = EINVAL;
144
145                 return 1;
146         }
147
148         __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n",
149                                 v->qsbr_cnt[thread_id].lock_cnt);
150
151         id = thread_id & __RTE_QSBR_THRID_MASK;
152         i = thread_id >> __RTE_QSBR_THRID_INDEX_SHIFT;
153
154         /* Make sure that the counter for registered threads does not
155          * go out of sync. Hence, additional checks are required.
156          */
157         /* Check if the thread is already unregistered */
158         old_bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
159                                         __ATOMIC_RELAXED);
160         if (old_bmap & ~(1UL << id))
161                 return 0;
162
163         do {
164                 new_bmap = old_bmap & ~(1UL << id);
165                 /* Make sure any loads of the shared data structure are
166                  * completed before removal of the thread from the list of
167                  * reporting threads.
168                  */
169                 success = __atomic_compare_exchange(
170                                         __RTE_QSBR_THRID_ARRAY_ELM(v, i),
171                                         &old_bmap, &new_bmap, 0,
172                                         __ATOMIC_RELEASE, __ATOMIC_RELAXED);
173
174                 if (success)
175                         __atomic_fetch_sub(&v->num_threads,
176                                                 1, __ATOMIC_RELAXED);
177                 else if (old_bmap & ~(1UL << id))
178                         /* Someone else unregistered this thread.
179                          * Counter should not be incremented.
180                          */
181                         return 0;
182         } while (success == 0);
183
184         return 0;
185 }
186
187 /* Wait till the reader threads have entered quiescent state. */
188 void
189 rte_rcu_qsbr_synchronize(struct rte_rcu_qsbr *v, unsigned int thread_id)
190 {
191         uint64_t t;
192
193         RTE_ASSERT(v != NULL);
194
195         t = rte_rcu_qsbr_start(v);
196
197         /* If the current thread has readside critical section,
198          * update its quiescent state status.
199          */
200         if (thread_id != RTE_QSBR_THRID_INVALID)
201                 rte_rcu_qsbr_quiescent(v, thread_id);
202
203         /* Wait for other readers to enter quiescent state */
204         rte_rcu_qsbr_check(v, t, true);
205 }
206
207 /* Dump the details of a single quiescent state variable to a file. */
208 int
209 rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
210 {
211         uint64_t bmap;
212         uint32_t i, t, id;
213
214         if (v == NULL || f == NULL) {
215                 rte_log(RTE_LOG_ERR, rte_rcu_log_type,
216                         "%s(): Invalid input parameter\n", __func__);
217                 rte_errno = EINVAL;
218
219                 return 1;
220         }
221
222         fprintf(f, "\nQuiescent State Variable @%p\n", v);
223
224         fprintf(f, "  QS variable memory size = %zu\n",
225                                 rte_rcu_qsbr_get_memsize(v->max_threads));
226         fprintf(f, "  Given # max threads = %u\n", v->max_threads);
227         fprintf(f, "  Current # threads = %u\n", v->num_threads);
228
229         fprintf(f, "  Registered thread IDs = ");
230         for (i = 0; i < v->num_elems; i++) {
231                 bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
232                                         __ATOMIC_ACQUIRE);
233                 id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
234                 while (bmap) {
235                         t = __builtin_ctzl(bmap);
236                         fprintf(f, "%u ", id + t);
237
238                         bmap &= ~(1UL << t);
239                 }
240         }
241
242         fprintf(f, "\n");
243
244         fprintf(f, "  Token = %"PRIu64"\n",
245                         __atomic_load_n(&v->token, __ATOMIC_ACQUIRE));
246
247         fprintf(f, "Quiescent State Counts for readers:\n");
248         for (i = 0; i < v->num_elems; i++) {
249                 bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
250                                         __ATOMIC_ACQUIRE);
251                 id = i << __RTE_QSBR_THRID_INDEX_SHIFT;
252                 while (bmap) {
253                         t = __builtin_ctzl(bmap);
254                         fprintf(f, "thread ID = %u, count = %"PRIu64", lock count = %u\n",
255                                 id + t,
256                                 __atomic_load_n(
257                                         &v->qsbr_cnt[id + t].cnt,
258                                         __ATOMIC_RELAXED),
259                                 __atomic_load_n(
260                                         &v->qsbr_cnt[id + t].lock_cnt,
261                                         __ATOMIC_RELAXED));
262                         bmap &= ~(1UL << t);
263                 }
264         }
265
266         return 0;
267 }
268
269 int rte_rcu_log_type;
270
271 RTE_INIT(rte_rcu_register)
272 {
273         rte_rcu_log_type = rte_log_register("lib.rcu");
274         if (rte_rcu_log_type >= 0)
275                 rte_log_set_level(rte_rcu_log_type, RTE_LOG_ERR);
276 }