c20d4d0cddab1f45bf2b599acf11c9e0dabc40fc
[dpdk.git] / drivers / event / octeontx / timvf_worker.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <rte_common.h>
6 #include <rte_branch_prediction.h>
7
8 #include "timvf_evdev.h"
9
10 static inline int16_t
11 timr_bkt_fetch_rem(uint64_t w1)
12 {
13         return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
14                 TIM_BUCKET_W1_M_CHUNK_REMAINDER;
15 }
16
17 static inline int16_t
18 timr_bkt_get_rem(struct tim_mem_bucket *bktp)
19 {
20         return __atomic_load_n(&bktp->chunk_remainder,
21                         __ATOMIC_ACQUIRE);
22 }
23
24 static inline void
25 timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
26 {
27         __atomic_store_n(&bktp->chunk_remainder, v,
28                         __ATOMIC_RELEASE);
29 }
30
31 static inline void
32 timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
33 {
34         __atomic_fetch_sub(&bktp->chunk_remainder, v,
35                         __ATOMIC_RELEASE);
36 }
37
38 static inline uint8_t
39 timr_bkt_get_sbt(uint64_t w1)
40 {
41         return (w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT;
42 }
43
44 static inline uint64_t
45 timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
46 {
47         const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
48         return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
49 }
50
51 static inline uint64_t
52 timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
53 {
54         const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
55         return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
56 }
57
58 static inline uint8_t
59 timr_bkt_get_shbt(uint64_t w1)
60 {
61         return ((w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT) |
62                 ((w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT);
63 }
64
65 static inline uint8_t
66 timr_bkt_get_hbt(uint64_t w1)
67 {
68         return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
69 }
70
71 static inline uint8_t
72 timr_bkt_get_bsk(uint64_t w1)
73 {
74         return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
75 }
76
77 static inline uint64_t
78 timr_bkt_clr_bsk(struct tim_mem_bucket *bktp)
79 {
80         /*Clear everything except lock. */
81         const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
82         return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
83 }
84
85 static inline uint64_t
86 timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
87 {
88         return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
89                         __ATOMIC_ACQ_REL);
90 }
91
92 static inline uint64_t
93 timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
94 {
95         return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
96                         __ATOMIC_RELAXED);
97 }
98
99 static inline uint64_t
100 timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
101 {
102         const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
103         return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
104 }
105
106 static inline void
107 timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
108 {
109         __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
110 }
111
112 static inline uint32_t
113 timr_bkt_get_nent(uint64_t w1)
114 {
115         return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
116                 TIM_BUCKET_W1_M_NUM_ENTRIES;
117 }
118
119 static inline void
120 timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
121 {
122         __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
123 }
124
125 static inline void
126 timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
127 {
128         __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
129 }
130
131 static inline uint64_t
132 timr_bkt_clr_nent(struct tim_mem_bucket *bktp)
133 {
134         const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
135                         TIM_BUCKET_W1_S_NUM_ENTRIES);
136         return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
137 }
138
139 static inline struct tim_mem_entry *
140 timr_clr_bkt(struct timvf_ring * const timr, struct tim_mem_bucket * const bkt)
141 {
142         struct tim_mem_entry *chunk;
143         struct tim_mem_entry *pnext;
144         chunk = ((struct tim_mem_entry *)(uintptr_t)bkt->first_chunk);
145         chunk = (struct tim_mem_entry *)(uintptr_t)(chunk + nb_chunk_slots)->w0;
146
147         while (chunk) {
148                 pnext = (struct tim_mem_entry *)(uintptr_t)
149                         ((chunk + nb_chunk_slots)->w0);
150                 rte_mempool_put(timr->chunk_pool, chunk);
151                 chunk = pnext;
152         }
153         return (struct tim_mem_entry *)(uintptr_t)bkt->first_chunk;
154 }
155
156 static inline int
157 timvf_rem_entry(struct rte_event_timer *tim)
158 {
159         uint64_t lock_sema;
160         struct tim_mem_entry *entry;
161         struct tim_mem_bucket *bkt;
162         if (tim->impl_opaque[1] == 0 ||
163                         tim->impl_opaque[0] == 0)
164                 return -ENOENT;
165
166         entry = (struct tim_mem_entry *)(uintptr_t)tim->impl_opaque[0];
167         if (entry->wqe != tim->ev.u64) {
168                 tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
169                 return -ENOENT;
170         }
171         bkt = (struct tim_mem_bucket *)(uintptr_t)tim->impl_opaque[1];
172         lock_sema = timr_bkt_inc_lock(bkt);
173         if (timr_bkt_get_shbt(lock_sema)
174                         || !timr_bkt_get_nent(lock_sema)) {
175                 timr_bkt_dec_lock(bkt);
176                 tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
177                 return -ENOENT;
178         }
179
180         entry->w0 = entry->wqe = 0;
181         timr_bkt_dec_lock(bkt);
182
183         tim->state = RTE_EVENT_TIMER_CANCELED;
184         tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
185         return 0;
186 }
187
188 static inline struct tim_mem_entry *
189 timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
190                 struct timvf_ring * const timr)
191 {
192         struct tim_mem_entry *chunk;
193
194         if (bkt->nb_entry || !bkt->first_chunk) {
195                 if (unlikely(rte_mempool_get(timr->chunk_pool,
196                                                 (void **)&chunk))) {
197                         return NULL;
198                 }
199                 if (bkt->nb_entry) {
200                         *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
201                                         bkt->current_chunk) +
202                                         nb_chunk_slots) =
203                                 (uintptr_t) chunk;
204                 } else {
205                         bkt->first_chunk = (uintptr_t) chunk;
206                 }
207         } else {
208                 chunk = timr_clr_bkt(timr, bkt);
209                 bkt->first_chunk = (uintptr_t)chunk;
210         }
211         *(uint64_t *)(chunk + nb_chunk_slots) = 0;
212
213         return chunk;
214 }
215
216 static inline struct tim_mem_bucket *
217 timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
218 {
219         const uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
220         const uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
221                         &timr->fast_div) + rel_bkt;
222         const uint32_t tbkt_id = timr->get_target_bkt(bucket,
223                         timr->nb_bkts);
224         return &timr->bkt[tbkt_id];
225 }
226
227 /* Multi producer functions. */
228 static inline int
229 timvf_add_entry_mp(struct timvf_ring * const timr, const uint32_t rel_bkt,
230                 struct rte_event_timer * const tim,
231                 const struct tim_mem_entry * const pent)
232 {
233         int16_t rem;
234         uint64_t lock_sema;
235         struct tim_mem_bucket *bkt;
236         struct tim_mem_entry *chunk;
237
238 __retry:
239         bkt = timvf_get_target_bucket(timr, rel_bkt);
240         /* Bucket related checks. */
241         /*Get Bucket sema*/
242         lock_sema = timr_bkt_fetch_sema_lock(bkt);
243         if (unlikely(timr_bkt_get_shbt(lock_sema))) {
244                 timr_bkt_dec_lock(bkt);
245                 goto __retry;
246         }
247
248         rem = timr_bkt_fetch_rem(lock_sema);
249
250         if (rem < 0) {
251                 /* goto diff bucket. */
252                 timr_bkt_dec_lock(bkt);
253                 goto __retry;
254         } else if (!rem) {
255                 /*Only one thread can be here*/
256                 chunk = timr->refill_chunk(bkt, timr);
257                 if (unlikely(chunk == NULL)) {
258                         timr_bkt_set_rem(bkt, 0);
259                         timr_bkt_dec_lock(bkt);
260                         tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
261                         tim->state = RTE_EVENT_TIMER_ERROR;
262                         return -ENOMEM;
263                 }
264                 bkt->current_chunk = (uintptr_t) chunk;
265                 timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
266         } else {
267                 chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
268                 chunk += nb_chunk_slots - rem;
269         }
270         /* Copy work entry. */
271         *chunk = *pent;
272         timr_bkt_inc_nent(bkt);
273         timr_bkt_dec_lock(bkt);
274
275         tim->impl_opaque[0] = (uintptr_t)chunk;
276         tim->impl_opaque[1] = (uintptr_t)bkt;
277         tim->state = RTE_EVENT_TIMER_ARMED;
278         return 0;
279 }