1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include <rte_common.h>
6 #include <rte_branch_prediction.h>
8 #include "timvf_evdev.h"
11 timr_bkt_fetch_rem(uint64_t w1)
13 return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
14 TIM_BUCKET_W1_M_CHUNK_REMAINDER;
18 timr_bkt_get_rem(struct tim_mem_bucket *bktp)
20 return __atomic_load_n(&bktp->chunk_remainder,
25 timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
27 __atomic_store_n(&bktp->chunk_remainder, v,
32 timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
34 __atomic_fetch_sub(&bktp->chunk_remainder, v,
39 timr_bkt_get_sbt(uint64_t w1)
41 return (w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT;
44 static inline uint64_t
45 timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
47 const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
48 return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
51 static inline uint64_t
52 timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
54 const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
55 return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
59 timr_bkt_get_shbt(uint64_t w1)
61 return ((w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT) |
62 ((w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT);
66 timr_bkt_get_hbt(uint64_t w1)
68 return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
72 timr_bkt_get_bsk(uint64_t w1)
74 return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
77 static inline uint64_t
78 timr_bkt_clr_bsk(struct tim_mem_bucket *bktp)
80 /*Clear everything except lock. */
81 const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
82 return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
85 static inline uint64_t
86 timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
88 return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
92 static inline uint64_t
93 timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
95 return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
99 static inline uint64_t
100 timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
102 const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
103 return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
107 timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
109 __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
112 static inline uint32_t
113 timr_bkt_get_nent(uint64_t w1)
115 return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
116 TIM_BUCKET_W1_M_NUM_ENTRIES;
120 timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
122 __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
126 timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
128 __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
131 static inline uint64_t
132 timr_bkt_clr_nent(struct tim_mem_bucket *bktp)
134 const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
135 TIM_BUCKET_W1_S_NUM_ENTRIES);
136 return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
139 static inline struct tim_mem_entry *
140 timr_clr_bkt(struct timvf_ring * const timr, struct tim_mem_bucket * const bkt)
142 struct tim_mem_entry *chunk;
143 struct tim_mem_entry *pnext;
144 chunk = ((struct tim_mem_entry *)(uintptr_t)bkt->first_chunk);
145 chunk = (struct tim_mem_entry *)(uintptr_t)(chunk + nb_chunk_slots)->w0;
148 pnext = (struct tim_mem_entry *)(uintptr_t)
149 ((chunk + nb_chunk_slots)->w0);
150 rte_mempool_put(timr->chunk_pool, chunk);
153 return (struct tim_mem_entry *)(uintptr_t)bkt->first_chunk;
157 timvf_rem_entry(struct rte_event_timer *tim)
160 struct tim_mem_entry *entry;
161 struct tim_mem_bucket *bkt;
162 if (tim->impl_opaque[1] == 0 ||
163 tim->impl_opaque[0] == 0)
166 entry = (struct tim_mem_entry *)(uintptr_t)tim->impl_opaque[0];
167 if (entry->wqe != tim->ev.u64) {
168 tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
171 bkt = (struct tim_mem_bucket *)(uintptr_t)tim->impl_opaque[1];
172 lock_sema = timr_bkt_inc_lock(bkt);
173 if (timr_bkt_get_shbt(lock_sema)
174 || !timr_bkt_get_nent(lock_sema)) {
175 timr_bkt_dec_lock(bkt);
176 tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
180 entry->w0 = entry->wqe = 0;
181 timr_bkt_dec_lock(bkt);
183 tim->state = RTE_EVENT_TIMER_CANCELED;
184 tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
188 static inline struct tim_mem_entry *
189 timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
190 struct timvf_ring * const timr)
192 struct tim_mem_entry *chunk;
194 if (bkt->nb_entry || !bkt->first_chunk) {
195 if (unlikely(rte_mempool_get(timr->chunk_pool,
200 *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
201 bkt->current_chunk) +
205 bkt->first_chunk = (uintptr_t) chunk;
208 chunk = timr_clr_bkt(timr, bkt);
209 bkt->first_chunk = (uintptr_t)chunk;
211 *(uint64_t *)(chunk + nb_chunk_slots) = 0;
216 static inline struct tim_mem_entry *
217 timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
218 struct timvf_ring * const timr)
220 struct tim_mem_entry *chunk;
222 if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk)))
225 *(uint64_t *)(chunk + nb_chunk_slots) = 0;
227 *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
228 bkt->current_chunk) +
232 bkt->first_chunk = (uintptr_t) chunk;
238 static inline struct tim_mem_bucket *
239 timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
241 const uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
242 const uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
243 &timr->fast_div) + rel_bkt;
244 const uint32_t tbkt_id = timr->get_target_bkt(bucket,
246 return &timr->bkt[tbkt_id];
249 /* Single producer functions. */
251 timvf_add_entry_sp(struct timvf_ring * const timr, const uint32_t rel_bkt,
252 struct rte_event_timer * const tim,
253 const struct tim_mem_entry * const pent)
257 struct tim_mem_bucket *bkt;
258 struct tim_mem_entry *chunk;
261 bkt = timvf_get_target_bucket(timr, rel_bkt);
264 lock_sema = timr_bkt_fetch_sema(bkt);
265 /* Bucket related checks. */
266 if (unlikely(timr_bkt_get_hbt(lock_sema)))
269 /* Insert the work. */
270 rem = timr_bkt_fetch_rem(lock_sema);
273 chunk = timr->refill_chunk(bkt, timr);
274 if (unlikely(chunk == NULL)) {
275 timr_bkt_set_rem(bkt, 0);
276 tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
277 tim->state = RTE_EVENT_TIMER_ERROR;
280 bkt->current_chunk = (uintptr_t) chunk;
281 timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
283 chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
284 chunk += nb_chunk_slots - rem;
286 /* Copy work entry. */
288 timr_bkt_inc_nent(bkt);
290 tim->impl_opaque[0] = (uintptr_t)chunk;
291 tim->impl_opaque[1] = (uintptr_t)bkt;
292 tim->state = RTE_EVENT_TIMER_ARMED;
296 /* Multi producer functions. */
298 timvf_add_entry_mp(struct timvf_ring * const timr, const uint32_t rel_bkt,
299 struct rte_event_timer * const tim,
300 const struct tim_mem_entry * const pent)
304 struct tim_mem_bucket *bkt;
305 struct tim_mem_entry *chunk;
308 bkt = timvf_get_target_bucket(timr, rel_bkt);
309 /* Bucket related checks. */
311 lock_sema = timr_bkt_fetch_sema_lock(bkt);
312 if (unlikely(timr_bkt_get_shbt(lock_sema))) {
313 timr_bkt_dec_lock(bkt);
317 rem = timr_bkt_fetch_rem(lock_sema);
320 /* goto diff bucket. */
321 timr_bkt_dec_lock(bkt);
324 /*Only one thread can be here*/
325 chunk = timr->refill_chunk(bkt, timr);
326 if (unlikely(chunk == NULL)) {
327 timr_bkt_set_rem(bkt, 0);
328 timr_bkt_dec_lock(bkt);
329 tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
330 tim->state = RTE_EVENT_TIMER_ERROR;
333 bkt->current_chunk = (uintptr_t) chunk;
334 timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
336 chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
337 chunk += nb_chunk_slots - rem;
339 /* Copy work entry. */
341 timr_bkt_inc_nent(bkt);
342 timr_bkt_dec_lock(bkt);
344 tim->impl_opaque[0] = (uintptr_t)chunk;
345 tim->impl_opaque[1] = (uintptr_t)bkt;
346 tim->state = RTE_EVENT_TIMER_ARMED;
350 static inline uint16_t
351 timvf_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
352 struct tim_mem_entry *chunk,
353 struct rte_event_timer ** const tim,
354 const struct tim_mem_entry * const ents,
355 const struct tim_mem_bucket * const bkt)
357 for (; index < cpy_lmt; index++) {
358 *chunk = *(ents + index);
359 tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
360 tim[index]->impl_opaque[1] = (uintptr_t)bkt;
361 tim[index]->state = RTE_EVENT_TIMER_ARMED;
367 /* Burst mode functions */
369 timvf_add_entry_brst(struct timvf_ring * const timr, const uint16_t rel_bkt,
370 struct rte_event_timer ** const tim,
371 const struct tim_mem_entry *ents,
372 const uint16_t nb_timers)
378 uint16_t chunk_remainder;
380 struct tim_mem_bucket *bkt;
381 struct tim_mem_entry *chunk;
384 bkt = timvf_get_target_bucket(timr, rel_bkt);
386 /* Only one thread beyond this. */
387 lock_sema = timr_bkt_inc_lock(bkt);
389 ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
392 timr_bkt_dec_lock(bkt);
396 /* Bucket related checks. */
397 if (unlikely(timr_bkt_get_hbt(lock_sema))) {
398 timr_bkt_dec_lock(bkt);
402 chunk_remainder = timr_bkt_fetch_rem(lock_sema);
403 rem = chunk_remainder - nb_timers;
405 crem = nb_chunk_slots - chunk_remainder;
406 if (chunk_remainder && crem) {
407 chunk = ((struct tim_mem_entry *)
408 (uintptr_t)bkt->current_chunk) + crem;
410 index = timvf_cpy_wrk(index, chunk_remainder,
411 chunk, tim, ents, bkt);
412 timr_bkt_sub_rem(bkt, chunk_remainder);
413 timr_bkt_add_nent(bkt, chunk_remainder);
415 rem = nb_timers - chunk_remainder;
416 ents = ents + chunk_remainder;
418 chunk = timr->refill_chunk(bkt, timr);
419 if (unlikely(chunk == NULL)) {
420 timr_bkt_dec_lock(bkt);
422 tim[index]->state = RTE_EVENT_TIMER_ERROR;
425 *(uint64_t *)(chunk + nb_chunk_slots) = 0;
426 bkt->current_chunk = (uintptr_t) chunk;
428 index = timvf_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
429 timr_bkt_set_rem(bkt, nb_chunk_slots - rem);
430 timr_bkt_add_nent(bkt, rem);
432 chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
433 chunk += (nb_chunk_slots - chunk_remainder);
435 index = timvf_cpy_wrk(index, nb_timers,
436 chunk, tim, ents, bkt);
437 timr_bkt_sub_rem(bkt, nb_timers);
438 timr_bkt_add_nent(bkt, nb_timers);
441 timr_bkt_dec_lock(bkt);