1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #ifndef __OTX2_TIM_WORKER_H__
6 #define __OTX2_TIM_WORKER_H__
8 #include "otx2_tim_evdev.h"
11 tim_bkt_fetch_lock(uint64_t w1)
13 return (w1 >> TIM_BUCKET_W1_S_LOCK) &
18 tim_bkt_fetch_rem(uint64_t w1)
20 return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
21 TIM_BUCKET_W1_M_CHUNK_REMAINDER;
25 tim_bkt_get_rem(struct otx2_tim_bkt *bktp)
27 return __atomic_load_n(&bktp->chunk_remainder, __ATOMIC_ACQUIRE);
31 tim_bkt_set_rem(struct otx2_tim_bkt *bktp, uint16_t v)
33 __atomic_store_n(&bktp->chunk_remainder, v, __ATOMIC_RELAXED);
37 tim_bkt_sub_rem(struct otx2_tim_bkt *bktp, uint16_t v)
39 __atomic_fetch_sub(&bktp->chunk_remainder, v, __ATOMIC_RELAXED);
43 tim_bkt_get_hbt(uint64_t w1)
45 return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
49 tim_bkt_get_bsk(uint64_t w1)
51 return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
54 static inline uint64_t
55 tim_bkt_clr_bsk(struct otx2_tim_bkt *bktp)
57 /* Clear everything except lock. */
58 const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
60 return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
63 static inline uint64_t
64 tim_bkt_fetch_sema_lock(struct otx2_tim_bkt *bktp)
66 return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
70 static inline uint64_t
71 tim_bkt_fetch_sema(struct otx2_tim_bkt *bktp)
73 return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA, __ATOMIC_RELAXED);
76 static inline uint64_t
77 tim_bkt_inc_lock(struct otx2_tim_bkt *bktp)
79 const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
81 return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQUIRE);
85 tim_bkt_dec_lock(struct otx2_tim_bkt *bktp)
87 __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_RELEASE);
90 static inline uint32_t
91 tim_bkt_get_nent(uint64_t w1)
93 return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
94 TIM_BUCKET_W1_M_NUM_ENTRIES;
98 tim_bkt_inc_nent(struct otx2_tim_bkt *bktp)
100 __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
104 tim_bkt_add_nent(struct otx2_tim_bkt *bktp, uint32_t v)
106 __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
109 static inline uint64_t
110 tim_bkt_clr_nent(struct otx2_tim_bkt *bktp)
112 const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
113 TIM_BUCKET_W1_S_NUM_ENTRIES);
115 return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
118 static inline uint64_t
119 tim_bkt_fast_mod(uint64_t n, uint64_t d, struct rte_reciprocal_u64 R)
121 return (n - (d * rte_reciprocal_divide_u64(n, &R)));
124 static __rte_always_inline void
125 tim_get_target_bucket(struct otx2_tim_ring *const tim_ring,
126 const uint32_t rel_bkt, struct otx2_tim_bkt **bkt,
127 struct otx2_tim_bkt **mirr_bkt)
129 const uint64_t bkt_cyc = rte_rdtsc() - tim_ring->ring_start_cyc;
131 rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div) +
133 uint64_t mirr_bucket = 0;
136 tim_bkt_fast_mod(bucket, tim_ring->nb_bkts, tim_ring->fast_bkt);
137 mirr_bucket = tim_bkt_fast_mod(bucket + (tim_ring->nb_bkts >> 1),
138 tim_ring->nb_bkts, tim_ring->fast_bkt);
139 *bkt = &tim_ring->bkt[bucket];
140 *mirr_bkt = &tim_ring->bkt[mirr_bucket];
143 static struct otx2_tim_ent *
144 tim_clr_bkt(struct otx2_tim_ring * const tim_ring,
145 struct otx2_tim_bkt * const bkt)
147 #define TIM_MAX_OUTSTANDING_OBJ 64
148 void *pend_chunks[TIM_MAX_OUTSTANDING_OBJ];
149 struct otx2_tim_ent *chunk;
150 struct otx2_tim_ent *pnext;
154 chunk = ((struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk);
155 chunk = (struct otx2_tim_ent *)(uintptr_t)(chunk +
156 tim_ring->nb_chunk_slots)->w0;
158 pnext = (struct otx2_tim_ent *)(uintptr_t)
159 ((chunk + tim_ring->nb_chunk_slots)->w0);
160 if (objs == TIM_MAX_OUTSTANDING_OBJ) {
161 rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks,
165 pend_chunks[objs++] = chunk;
170 rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks,
173 return (struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk;
176 static struct otx2_tim_ent *
177 tim_refill_chunk(struct otx2_tim_bkt * const bkt,
178 struct otx2_tim_bkt * const mirr_bkt,
179 struct otx2_tim_ring * const tim_ring)
181 struct otx2_tim_ent *chunk;
183 if (bkt->nb_entry || !bkt->first_chunk) {
184 if (unlikely(rte_mempool_get(tim_ring->chunk_pool,
188 *(uint64_t *)(((struct otx2_tim_ent *)
189 mirr_bkt->current_chunk) +
190 tim_ring->nb_chunk_slots) =
193 bkt->first_chunk = (uintptr_t)chunk;
196 chunk = tim_clr_bkt(tim_ring, bkt);
197 bkt->first_chunk = (uintptr_t)chunk;
199 *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
204 static struct otx2_tim_ent *
205 tim_insert_chunk(struct otx2_tim_bkt * const bkt,
206 struct otx2_tim_bkt * const mirr_bkt,
207 struct otx2_tim_ring * const tim_ring)
209 struct otx2_tim_ent *chunk;
211 if (unlikely(rte_mempool_get(tim_ring->chunk_pool, (void **)&chunk)))
214 *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
216 *(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t)
217 mirr_bkt->current_chunk) +
218 tim_ring->nb_chunk_slots) = (uintptr_t)chunk;
220 bkt->first_chunk = (uintptr_t)chunk;
225 static __rte_always_inline int
226 tim_add_entry_sp(struct otx2_tim_ring * const tim_ring,
227 const uint32_t rel_bkt,
228 struct rte_event_timer * const tim,
229 const struct otx2_tim_ent * const pent,
232 struct otx2_tim_bkt *mirr_bkt;
233 struct otx2_tim_ent *chunk;
234 struct otx2_tim_bkt *bkt;
239 tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
242 lock_sema = tim_bkt_fetch_sema_lock(bkt);
244 /* Bucket related checks. */
245 if (unlikely(tim_bkt_get_hbt(lock_sema))) {
246 if (tim_bkt_get_nent(lock_sema) != 0) {
248 #ifdef RTE_ARCH_ARM64
250 " ldaxr %[hbt], [%[w1]] \n"
251 " tbz %[hbt], 33, dne%= \n"
254 " ldaxr %[hbt], [%[w1]] \n"
255 " tbnz %[hbt], 33, rty%= \n"
257 : [hbt] "=&r" (hbt_state)
258 : [w1] "r" ((&bkt->w1))
263 hbt_state = __atomic_load_n(&bkt->w1,
265 } while (hbt_state & BIT_ULL(33));
268 if (!(hbt_state & BIT_ULL(34))) {
269 tim_bkt_dec_lock(bkt);
274 /* Insert the work. */
275 rem = tim_bkt_fetch_rem(lock_sema);
278 if (flags & OTX2_TIM_ENA_FB)
279 chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
280 if (flags & OTX2_TIM_ENA_DFB)
281 chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
283 if (unlikely(chunk == NULL)) {
284 bkt->chunk_remainder = 0;
285 tim_bkt_dec_lock(bkt);
286 tim->impl_opaque[0] = 0;
287 tim->impl_opaque[1] = 0;
288 tim->state = RTE_EVENT_TIMER_ERROR;
291 mirr_bkt->current_chunk = (uintptr_t)chunk;
292 bkt->chunk_remainder = tim_ring->nb_chunk_slots - 1;
294 chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
295 chunk += tim_ring->nb_chunk_slots - rem;
298 /* Copy work entry. */
301 tim_bkt_inc_nent(bkt);
302 tim_bkt_dec_lock(bkt);
304 tim->impl_opaque[0] = (uintptr_t)chunk;
305 tim->impl_opaque[1] = (uintptr_t)bkt;
306 tim->state = RTE_EVENT_TIMER_ARMED;
311 static __rte_always_inline int
312 tim_add_entry_mp(struct otx2_tim_ring * const tim_ring,
313 const uint32_t rel_bkt,
314 struct rte_event_timer * const tim,
315 const struct otx2_tim_ent * const pent,
318 struct otx2_tim_bkt *mirr_bkt;
319 struct otx2_tim_ent *chunk;
320 struct otx2_tim_bkt *bkt;
325 tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
327 lock_sema = tim_bkt_fetch_sema_lock(bkt);
329 /* Bucket related checks. */
330 if (unlikely(tim_bkt_get_hbt(lock_sema))) {
331 if (tim_bkt_get_nent(lock_sema) != 0) {
333 #ifdef RTE_ARCH_ARM64
335 " ldaxr %[hbt], [%[w1]] \n"
336 " tbz %[hbt], 33, dne%= \n"
339 " ldaxr %[hbt], [%[w1]] \n"
340 " tbnz %[hbt], 33, rty%= \n"
342 : [hbt] "=&r" (hbt_state)
343 : [w1] "r" ((&bkt->w1))
348 hbt_state = __atomic_load_n(&bkt->w1,
350 } while (hbt_state & BIT_ULL(33));
353 if (!(hbt_state & BIT_ULL(34))) {
354 tim_bkt_dec_lock(bkt);
360 rem = tim_bkt_fetch_rem(lock_sema);
362 #ifdef RTE_ARCH_ARM64
364 " ldaxrh %w[rem], [%[crem]] \n"
365 " tbz %w[rem], 15, dne%= \n"
368 " ldaxrh %w[rem], [%[crem]] \n"
369 " tbnz %w[rem], 15, rty%= \n"
372 : [crem] "r" (&bkt->chunk_remainder)
376 while (__atomic_load_n(&bkt->chunk_remainder,
377 __ATOMIC_ACQUIRE) < 0)
380 /* Goto diff bucket. */
381 tim_bkt_dec_lock(bkt);
384 /* Only one thread can be here*/
385 if (flags & OTX2_TIM_ENA_FB)
386 chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
387 if (flags & OTX2_TIM_ENA_DFB)
388 chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
390 if (unlikely(chunk == NULL)) {
391 tim_bkt_set_rem(bkt, 0);
392 tim_bkt_dec_lock(bkt);
393 tim->impl_opaque[0] = 0;
394 tim->impl_opaque[1] = 0;
395 tim->state = RTE_EVENT_TIMER_ERROR;
399 while (tim_bkt_fetch_lock(lock_sema) !=
400 (-tim_bkt_fetch_rem(lock_sema)))
401 lock_sema = __atomic_load_n(&bkt->w1, __ATOMIC_ACQUIRE);
403 mirr_bkt->current_chunk = (uintptr_t)chunk;
404 __atomic_store_n(&bkt->chunk_remainder,
405 tim_ring->nb_chunk_slots - 1, __ATOMIC_RELEASE);
407 chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
408 chunk += tim_ring->nb_chunk_slots - rem;
412 /* Copy work entry. */
413 tim_bkt_inc_nent(bkt);
414 tim_bkt_dec_lock(bkt);
415 tim->impl_opaque[0] = (uintptr_t)chunk;
416 tim->impl_opaque[1] = (uintptr_t)bkt;
417 tim->state = RTE_EVENT_TIMER_ARMED;
422 static inline uint16_t
423 tim_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
424 struct otx2_tim_ent *chunk,
425 struct rte_event_timer ** const tim,
426 const struct otx2_tim_ent * const ents,
427 const struct otx2_tim_bkt * const bkt)
429 for (; index < cpy_lmt; index++) {
430 *chunk = *(ents + index);
431 tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
432 tim[index]->impl_opaque[1] = (uintptr_t)bkt;
433 tim[index]->state = RTE_EVENT_TIMER_ARMED;
439 /* Burst mode functions */
441 tim_add_entry_brst(struct otx2_tim_ring * const tim_ring,
442 const uint16_t rel_bkt,
443 struct rte_event_timer ** const tim,
444 const struct otx2_tim_ent *ents,
445 const uint16_t nb_timers, const uint8_t flags)
447 struct otx2_tim_ent *chunk = NULL;
448 struct otx2_tim_bkt *mirr_bkt;
449 struct otx2_tim_bkt *bkt;
450 uint16_t chunk_remainder;
457 tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
459 /* Only one thread beyond this. */
460 lock_sema = tim_bkt_inc_lock(bkt);
462 ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
465 tim_bkt_dec_lock(bkt);
469 /* Bucket related checks. */
470 if (unlikely(tim_bkt_get_hbt(lock_sema))) {
471 if (tim_bkt_get_nent(lock_sema) != 0) {
473 #ifdef RTE_ARCH_ARM64
475 " ldaxr %[hbt], [%[w1]] \n"
476 " tbz %[hbt], 33, dne%= \n"
479 " ldaxr %[hbt], [%[w1]] \n"
480 " tbnz %[hbt], 33, rty%= \n"
482 : [hbt] "=&r" (hbt_state)
483 : [w1] "r" ((&bkt->w1))
488 hbt_state = __atomic_load_n(&bkt->w1,
490 } while (hbt_state & BIT_ULL(33));
493 if (!(hbt_state & BIT_ULL(34))) {
494 tim_bkt_dec_lock(bkt);
500 chunk_remainder = tim_bkt_fetch_rem(lock_sema);
501 rem = chunk_remainder - nb_timers;
503 crem = tim_ring->nb_chunk_slots - chunk_remainder;
504 if (chunk_remainder && crem) {
505 chunk = ((struct otx2_tim_ent *)
506 mirr_bkt->current_chunk) + crem;
508 index = tim_cpy_wrk(index, chunk_remainder, chunk, tim,
510 tim_bkt_sub_rem(bkt, chunk_remainder);
511 tim_bkt_add_nent(bkt, chunk_remainder);
514 if (flags & OTX2_TIM_ENA_FB)
515 chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
516 if (flags & OTX2_TIM_ENA_DFB)
517 chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
519 if (unlikely(chunk == NULL)) {
520 tim_bkt_dec_lock(bkt);
522 tim[index]->state = RTE_EVENT_TIMER_ERROR;
525 *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
526 mirr_bkt->current_chunk = (uintptr_t)chunk;
527 tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
529 rem = nb_timers - chunk_remainder;
530 tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - rem);
531 tim_bkt_add_nent(bkt, rem);
533 chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
534 chunk += (tim_ring->nb_chunk_slots - chunk_remainder);
536 tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
537 tim_bkt_sub_rem(bkt, nb_timers);
538 tim_bkt_add_nent(bkt, nb_timers);
541 tim_bkt_dec_lock(bkt);
547 tim_rm_entry(struct rte_event_timer *tim)
549 struct otx2_tim_ent *entry;
550 struct otx2_tim_bkt *bkt;
553 if (tim->impl_opaque[1] == 0 || tim->impl_opaque[0] == 0)
556 entry = (struct otx2_tim_ent *)(uintptr_t)tim->impl_opaque[0];
557 if (entry->wqe != tim->ev.u64) {
558 tim->impl_opaque[0] = 0;
559 tim->impl_opaque[1] = 0;
563 bkt = (struct otx2_tim_bkt *)(uintptr_t)tim->impl_opaque[1];
564 lock_sema = tim_bkt_inc_lock(bkt);
565 if (tim_bkt_get_hbt(lock_sema) || !tim_bkt_get_nent(lock_sema)) {
566 tim_bkt_dec_lock(bkt);
567 tim->impl_opaque[0] = 0;
568 tim->impl_opaque[1] = 0;
574 tim_bkt_dec_lock(bkt);
576 tim->state = RTE_EVENT_TIMER_CANCELED;
577 tim->impl_opaque[0] = 0;
578 tim->impl_opaque[1] = 0;
583 #endif /* __OTX2_TIM_WORKER_H__ */