#include "otx2_tim_evdev.h"
+static inline uint8_t
+tim_bkt_fetch_lock(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_LOCK) &
+ TIM_BUCKET_W1_M_LOCK;
+}
+
static inline int16_t
tim_bkt_fetch_rem(uint64_t w1)
{
static inline void
tim_bkt_dec_lock(struct otx2_tim_bkt *bktp)
{
- __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_RELEASE);
+ __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELEASE);
+}
+
+static inline void
+tim_bkt_dec_lock_relaxed(struct otx2_tim_bkt *bktp)
+{
+ __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELAXED);
}
static inline uint32_t
return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
}
-static __rte_always_inline struct otx2_tim_bkt *
-tim_get_target_bucket(struct otx2_tim_ring * const tim_ring,
- const uint32_t rel_bkt, const uint8_t flag)
+static inline uint64_t
+tim_bkt_fast_mod(uint64_t n, uint64_t d, struct rte_reciprocal_u64 R)
{
- const uint64_t bkt_cyc = rte_rdtsc() - tim_ring->ring_start_cyc;
- uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
- &tim_ring->fast_div) + rel_bkt;
-
- if (flag & OTX2_TIM_BKT_MOD)
- bucket = bucket % tim_ring->nb_bkts;
- if (flag & OTX2_TIM_BKT_AND)
- bucket = bucket & (tim_ring->nb_bkts - 1);
+ return (n - (d * rte_reciprocal_divide_u64(n, &R)));
+}
- return &tim_ring->bkt[bucket];
+static __rte_always_inline void
+tim_get_target_bucket(struct otx2_tim_ring *const tim_ring,
+ const uint32_t rel_bkt, struct otx2_tim_bkt **bkt,
+ struct otx2_tim_bkt **mirr_bkt)
+{
+ const uint64_t bkt_cyc = tim_cntvct() - tim_ring->ring_start_cyc;
+ uint64_t bucket =
+ rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div) +
+ rel_bkt;
+ uint64_t mirr_bucket = 0;
+
+ bucket =
+ tim_bkt_fast_mod(bucket, tim_ring->nb_bkts, tim_ring->fast_bkt);
+ mirr_bucket = tim_bkt_fast_mod(bucket + (tim_ring->nb_bkts >> 1),
+ tim_ring->nb_bkts, tim_ring->fast_bkt);
+ *bkt = &tim_ring->bkt[bucket];
+ *mirr_bkt = &tim_ring->bkt[mirr_bucket];
}
static struct otx2_tim_ent *
tim_clr_bkt(struct otx2_tim_ring * const tim_ring,
struct otx2_tim_bkt * const bkt)
{
+#define TIM_MAX_OUTSTANDING_OBJ 64
+ void *pend_chunks[TIM_MAX_OUTSTANDING_OBJ];
struct otx2_tim_ent *chunk;
struct otx2_tim_ent *pnext;
+ uint8_t objs = 0;
+
chunk = ((struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk);
chunk = (struct otx2_tim_ent *)(uintptr_t)(chunk +
while (chunk) {
pnext = (struct otx2_tim_ent *)(uintptr_t)
((chunk + tim_ring->nb_chunk_slots)->w0);
- rte_mempool_put(tim_ring->chunk_pool, chunk);
+ if (objs == TIM_MAX_OUTSTANDING_OBJ) {
+ rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks,
+ objs);
+ objs = 0;
+ }
+ pend_chunks[objs++] = chunk;
chunk = pnext;
}
+ if (objs)
+ rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks,
+ objs);
+
return (struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk;
}
static struct otx2_tim_ent *
tim_refill_chunk(struct otx2_tim_bkt * const bkt,
+ struct otx2_tim_bkt * const mirr_bkt,
struct otx2_tim_ring * const tim_ring)
{
struct otx2_tim_ent *chunk;
(void **)&chunk)))
return NULL;
if (bkt->nb_entry) {
- *(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t)
- bkt->current_chunk) +
+ *(uint64_t *)(((struct otx2_tim_ent *)
+ mirr_bkt->current_chunk) +
tim_ring->nb_chunk_slots) =
(uintptr_t)chunk;
} else {
static struct otx2_tim_ent *
tim_insert_chunk(struct otx2_tim_bkt * const bkt,
+ struct otx2_tim_bkt * const mirr_bkt,
struct otx2_tim_ring * const tim_ring)
{
struct otx2_tim_ent *chunk;
*(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
if (bkt->nb_entry) {
*(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t)
- bkt->current_chunk) +
+ mirr_bkt->current_chunk) +
tim_ring->nb_chunk_slots) = (uintptr_t)chunk;
} else {
bkt->first_chunk = (uintptr_t)chunk;
}
-
return chunk;
}
const struct otx2_tim_ent * const pent,
const uint8_t flags)
{
+ struct otx2_tim_bkt *mirr_bkt;
struct otx2_tim_ent *chunk;
struct otx2_tim_bkt *bkt;
uint64_t lock_sema;
int16_t rem;
- bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags);
-
__retry:
+ tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
+
/* Get Bucket sema*/
- lock_sema = tim_bkt_fetch_sema(bkt);
+ lock_sema = tim_bkt_fetch_sema_lock(bkt);
/* Bucket related checks. */
- if (unlikely(tim_bkt_get_hbt(lock_sema)))
- goto __retry;
-
+ if (unlikely(tim_bkt_get_hbt(lock_sema))) {
+ if (tim_bkt_get_nent(lock_sema) != 0) {
+ uint64_t hbt_state;
+#ifdef RTE_ARCH_ARM64
+ asm volatile(" ldxr %[hbt], [%[w1]] \n"
+ " tbz %[hbt], 33, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldxr %[hbt], [%[w1]] \n"
+ " tbnz %[hbt], 33, rty%= \n"
+ "dne%=: \n"
+ : [hbt] "=&r"(hbt_state)
+ : [w1] "r"((&bkt->w1))
+ : "memory");
+#else
+ do {
+ hbt_state = __atomic_load_n(&bkt->w1,
+ __ATOMIC_RELAXED);
+ } while (hbt_state & BIT_ULL(33));
+#endif
+
+ if (!(hbt_state & BIT_ULL(34))) {
+ tim_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+ }
+ }
/* Insert the work. */
rem = tim_bkt_fetch_rem(lock_sema);
if (!rem) {
if (flags & OTX2_TIM_ENA_FB)
- chunk = tim_refill_chunk(bkt, tim_ring);
+ chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
if (flags & OTX2_TIM_ENA_DFB)
- chunk = tim_insert_chunk(bkt, tim_ring);
+ chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
if (unlikely(chunk == NULL)) {
- tim_bkt_set_rem(bkt, 0);
+ bkt->chunk_remainder = 0;
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
tim->state = RTE_EVENT_TIMER_ERROR;
+ tim_bkt_dec_lock(bkt);
return -ENOMEM;
}
- bkt->current_chunk = (uintptr_t)chunk;
- tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - 1);
+ mirr_bkt->current_chunk = (uintptr_t)chunk;
+ bkt->chunk_remainder = tim_ring->nb_chunk_slots - 1;
} else {
- chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk;
+ chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
chunk += tim_ring->nb_chunk_slots - rem;
}
/* Copy work entry. */
*chunk = *pent;
- tim_bkt_inc_nent(bkt);
-
tim->impl_opaque[0] = (uintptr_t)chunk;
tim->impl_opaque[1] = (uintptr_t)bkt;
- tim->state = RTE_EVENT_TIMER_ARMED;
+ __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
+ tim_bkt_inc_nent(bkt);
+ tim_bkt_dec_lock_relaxed(bkt);
return 0;
}
const struct otx2_tim_ent * const pent,
const uint8_t flags)
{
+ struct otx2_tim_bkt *mirr_bkt;
struct otx2_tim_ent *chunk;
struct otx2_tim_bkt *bkt;
uint64_t lock_sema;
int16_t rem;
__retry:
- bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags);
-
+ tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
/* Get Bucket sema*/
lock_sema = tim_bkt_fetch_sema_lock(bkt);
/* Bucket related checks. */
if (unlikely(tim_bkt_get_hbt(lock_sema))) {
- tim_bkt_dec_lock(bkt);
- goto __retry;
+ if (tim_bkt_get_nent(lock_sema) != 0) {
+ uint64_t hbt_state;
+#ifdef RTE_ARCH_ARM64
+ asm volatile(" ldxr %[hbt], [%[w1]] \n"
+ " tbz %[hbt], 33, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldxr %[hbt], [%[w1]] \n"
+ " tbnz %[hbt], 33, rty%= \n"
+ "dne%=: \n"
+ : [hbt] "=&r"(hbt_state)
+ : [w1] "r"((&bkt->w1))
+ : "memory");
+#else
+ do {
+ hbt_state = __atomic_load_n(&bkt->w1,
+ __ATOMIC_RELAXED);
+ } while (hbt_state & BIT_ULL(33));
+#endif
+
+ if (!(hbt_state & BIT_ULL(34))) {
+ tim_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+ }
}
rem = tim_bkt_fetch_rem(lock_sema);
-
if (rem < 0) {
- /* Goto diff bucket. */
tim_bkt_dec_lock(bkt);
+#ifdef RTE_ARCH_ARM64
+ uint64_t w1;
+ asm volatile(" ldxr %[w1], [%[crem]] \n"
+ " tbz %[w1], 63, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldxr %[w1], [%[crem]] \n"
+ " tbnz %[w1], 63, rty%= \n"
+ "dne%=: \n"
+ : [w1] "=&r"(w1)
+ : [crem] "r"(&bkt->w1)
+ : "memory");
+#else
+ while (__atomic_load_n((int64_t *)&bkt->w1, __ATOMIC_RELAXED) <
+ 0)
+ ;
+#endif
goto __retry;
} else if (!rem) {
/* Only one thread can be here*/
if (flags & OTX2_TIM_ENA_FB)
- chunk = tim_refill_chunk(bkt, tim_ring);
+ chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
if (flags & OTX2_TIM_ENA_DFB)
- chunk = tim_insert_chunk(bkt, tim_ring);
+ chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
if (unlikely(chunk == NULL)) {
- tim_bkt_set_rem(bkt, 0);
- tim_bkt_dec_lock(bkt);
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
tim->state = RTE_EVENT_TIMER_ERROR;
+ tim_bkt_set_rem(bkt, 0);
+ tim_bkt_dec_lock(bkt);
return -ENOMEM;
}
- bkt->current_chunk = (uintptr_t)chunk;
- tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - 1);
+ *chunk = *pent;
+ if (tim_bkt_fetch_lock(lock_sema)) {
+ do {
+ lock_sema = __atomic_load_n(&bkt->w1,
+ __ATOMIC_RELAXED);
+ } while (tim_bkt_fetch_lock(lock_sema) - 1);
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+ }
+ mirr_bkt->current_chunk = (uintptr_t)chunk;
+ __atomic_store_n(&bkt->chunk_remainder,
+ tim_ring->nb_chunk_slots - 1, __ATOMIC_RELEASE);
} else {
- chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk;
+ chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
chunk += tim_ring->nb_chunk_slots - rem;
+ *chunk = *pent;
}
- /* Copy work entry. */
- *chunk = *pent;
- tim_bkt_dec_lock(bkt);
- tim_bkt_inc_nent(bkt);
tim->impl_opaque[0] = (uintptr_t)chunk;
tim->impl_opaque[1] = (uintptr_t)bkt;
- tim->state = RTE_EVENT_TIMER_ARMED;
+ __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE);
+ tim_bkt_inc_nent(bkt);
+ tim_bkt_dec_lock_relaxed(bkt);
return 0;
}
const uint16_t nb_timers, const uint8_t flags)
{
struct otx2_tim_ent *chunk = NULL;
+ struct otx2_tim_bkt *mirr_bkt;
struct otx2_tim_bkt *bkt;
uint16_t chunk_remainder;
uint16_t index = 0;
uint8_t lock_cnt;
__retry:
- bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags);
+ tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt);
/* Only one thread beyond this. */
lock_sema = tim_bkt_inc_lock(bkt);
if (lock_cnt) {
tim_bkt_dec_lock(bkt);
+#ifdef RTE_ARCH_ARM64
+ asm volatile(" ldxrb %w[lock_cnt], [%[lock]] \n"
+ " tst %w[lock_cnt], 255 \n"
+ " beq dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldxrb %w[lock_cnt], [%[lock]] \n"
+ " tst %w[lock_cnt], 255 \n"
+ " bne rty%= \n"
+ "dne%=: \n"
+ : [lock_cnt] "=&r"(lock_cnt)
+ : [lock] "r"(&bkt->lock)
+ : "memory");
+#else
+ while (__atomic_load_n(&bkt->lock, __ATOMIC_RELAXED))
+ ;
+#endif
goto __retry;
}
/* Bucket related checks. */
if (unlikely(tim_bkt_get_hbt(lock_sema))) {
- tim_bkt_dec_lock(bkt);
- goto __retry;
+ if (tim_bkt_get_nent(lock_sema) != 0) {
+ uint64_t hbt_state;
+#ifdef RTE_ARCH_ARM64
+ asm volatile(" ldxr %[hbt], [%[w1]] \n"
+ " tbz %[hbt], 33, dne%= \n"
+ " sevl \n"
+ "rty%=: wfe \n"
+ " ldxr %[hbt], [%[w1]] \n"
+ " tbnz %[hbt], 33, rty%= \n"
+ "dne%=: \n"
+ : [hbt] "=&r"(hbt_state)
+ : [w1] "r"((&bkt->w1))
+ : "memory");
+#else
+ do {
+ hbt_state = __atomic_load_n(&bkt->w1,
+ __ATOMIC_RELAXED);
+ } while (hbt_state & BIT_ULL(33));
+#endif
+
+ if (!(hbt_state & BIT_ULL(34))) {
+ tim_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+ }
}
chunk_remainder = tim_bkt_fetch_rem(lock_sema);
crem = tim_ring->nb_chunk_slots - chunk_remainder;
if (chunk_remainder && crem) {
chunk = ((struct otx2_tim_ent *)
- (uintptr_t)bkt->current_chunk) + crem;
+ mirr_bkt->current_chunk) + crem;
index = tim_cpy_wrk(index, chunk_remainder, chunk, tim,
ents, bkt);
}
if (flags & OTX2_TIM_ENA_FB)
- chunk = tim_refill_chunk(bkt, tim_ring);
+ chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring);
if (flags & OTX2_TIM_ENA_DFB)
- chunk = tim_insert_chunk(bkt, tim_ring);
+ chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring);
if (unlikely(chunk == NULL)) {
tim_bkt_dec_lock(bkt);
return crem;
}
*(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0;
- bkt->current_chunk = (uintptr_t)chunk;
+ mirr_bkt->current_chunk = (uintptr_t)chunk;
tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
rem = nb_timers - chunk_remainder;
tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - rem);
tim_bkt_add_nent(bkt, rem);
} else {
- chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk;
+ chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk;
chunk += (tim_ring->nb_chunk_slots - chunk_remainder);
tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
bkt = (struct otx2_tim_bkt *)(uintptr_t)tim->impl_opaque[1];
lock_sema = tim_bkt_inc_lock(bkt);
if (tim_bkt_get_hbt(lock_sema) || !tim_bkt_get_nent(lock_sema)) {
- tim_bkt_dec_lock(bkt);
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
+ tim_bkt_dec_lock(bkt);
return -ENOENT;
}
entry->w0 = 0;
entry->wqe = 0;
- tim_bkt_dec_lock(bkt);
-
tim->state = RTE_EVENT_TIMER_CANCELED;
tim->impl_opaque[0] = 0;
tim->impl_opaque[1] = 0;
+ tim_bkt_dec_lock(bkt);
return 0;
}