X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Focteontx2%2Fotx2_tim_worker.h;h=efe88a8692e2f5cb940b1ae72b6b49a4032420be;hb=3f95dfb9c4ed6a026112330af78f8e3bae7156fc;hp=c896b543383f8f5f8c2656d3abaa7916c269a40e;hpb=7c6e645b599fcb334396280cf1a067a74e20b424;p=dpdk.git diff --git a/drivers/event/octeontx2/otx2_tim_worker.h b/drivers/event/octeontx2/otx2_tim_worker.h index c896b54338..efe88a8692 100644 --- a/drivers/event/octeontx2/otx2_tim_worker.h +++ b/drivers/event/octeontx2/otx2_tim_worker.h @@ -84,7 +84,13 @@ tim_bkt_inc_lock(struct otx2_tim_bkt *bktp) static inline void tim_bkt_dec_lock(struct otx2_tim_bkt *bktp) { - __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_RELEASE); + __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELEASE); +} + +static inline void +tim_bkt_dec_lock_relaxed(struct otx2_tim_bkt *bktp) +{ + __atomic_fetch_sub(&bktp->lock, 1, __ATOMIC_RELAXED); } static inline uint32_t @@ -115,28 +121,41 @@ tim_bkt_clr_nent(struct otx2_tim_bkt *bktp) return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL); } -static __rte_always_inline struct otx2_tim_bkt * -tim_get_target_bucket(struct otx2_tim_ring * const tim_ring, - const uint32_t rel_bkt, const uint8_t flag) +static inline uint64_t +tim_bkt_fast_mod(uint64_t n, uint64_t d, struct rte_reciprocal_u64 R) { - const uint64_t bkt_cyc = rte_rdtsc() - tim_ring->ring_start_cyc; - uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc, - &tim_ring->fast_div) + rel_bkt; - - if (flag & OTX2_TIM_BKT_MOD) - bucket = bucket % tim_ring->nb_bkts; - if (flag & OTX2_TIM_BKT_AND) - bucket = bucket & (tim_ring->nb_bkts - 1); + return (n - (d * rte_reciprocal_divide_u64(n, &R))); +} - return &tim_ring->bkt[bucket]; +static __rte_always_inline void +tim_get_target_bucket(struct otx2_tim_ring *const tim_ring, + const uint32_t rel_bkt, struct otx2_tim_bkt **bkt, + struct otx2_tim_bkt **mirr_bkt) +{ + const uint64_t bkt_cyc = tim_cntvct() - tim_ring->ring_start_cyc; + uint64_t bucket = + rte_reciprocal_divide_u64(bkt_cyc, &tim_ring->fast_div) + + rel_bkt; + uint64_t mirr_bucket = 0; + + bucket = + tim_bkt_fast_mod(bucket, tim_ring->nb_bkts, tim_ring->fast_bkt); + mirr_bucket = tim_bkt_fast_mod(bucket + (tim_ring->nb_bkts >> 1), + tim_ring->nb_bkts, tim_ring->fast_bkt); + *bkt = &tim_ring->bkt[bucket]; + *mirr_bkt = &tim_ring->bkt[mirr_bucket]; } static struct otx2_tim_ent * tim_clr_bkt(struct otx2_tim_ring * const tim_ring, struct otx2_tim_bkt * const bkt) { +#define TIM_MAX_OUTSTANDING_OBJ 64 + void *pend_chunks[TIM_MAX_OUTSTANDING_OBJ]; struct otx2_tim_ent *chunk; struct otx2_tim_ent *pnext; + uint8_t objs = 0; + chunk = ((struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk); chunk = (struct otx2_tim_ent *)(uintptr_t)(chunk + @@ -144,15 +163,25 @@ tim_clr_bkt(struct otx2_tim_ring * const tim_ring, while (chunk) { pnext = (struct otx2_tim_ent *)(uintptr_t) ((chunk + tim_ring->nb_chunk_slots)->w0); - rte_mempool_put(tim_ring->chunk_pool, chunk); + if (objs == TIM_MAX_OUTSTANDING_OBJ) { + rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks, + objs); + objs = 0; + } + pend_chunks[objs++] = chunk; chunk = pnext; } + if (objs) + rte_mempool_put_bulk(tim_ring->chunk_pool, pend_chunks, + objs); + return (struct otx2_tim_ent *)(uintptr_t)bkt->first_chunk; } static struct otx2_tim_ent * tim_refill_chunk(struct otx2_tim_bkt * const bkt, + struct otx2_tim_bkt * const mirr_bkt, struct otx2_tim_ring * const tim_ring) { struct otx2_tim_ent *chunk; @@ -162,8 +191,8 @@ tim_refill_chunk(struct otx2_tim_bkt * const bkt, (void **)&chunk))) return NULL; if (bkt->nb_entry) { - *(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t) - bkt->current_chunk) + + *(uint64_t *)(((struct otx2_tim_ent *) + mirr_bkt->current_chunk) + tim_ring->nb_chunk_slots) = (uintptr_t)chunk; } else { @@ -180,6 +209,7 @@ tim_refill_chunk(struct otx2_tim_bkt * const bkt, static struct otx2_tim_ent * tim_insert_chunk(struct otx2_tim_bkt * const bkt, + struct otx2_tim_bkt * const mirr_bkt, struct otx2_tim_ring * const tim_ring) { struct otx2_tim_ent *chunk; @@ -190,7 +220,7 @@ tim_insert_chunk(struct otx2_tim_bkt * const bkt, *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0; if (bkt->nb_entry) { *(uint64_t *)(((struct otx2_tim_ent *)(uintptr_t) - bkt->current_chunk) + + mirr_bkt->current_chunk) + tim_ring->nb_chunk_slots) = (uintptr_t)chunk; } else { bkt->first_chunk = (uintptr_t)chunk; @@ -205,14 +235,15 @@ tim_add_entry_sp(struct otx2_tim_ring * const tim_ring, const struct otx2_tim_ent * const pent, const uint8_t flags) { + struct otx2_tim_bkt *mirr_bkt; struct otx2_tim_ent *chunk; struct otx2_tim_bkt *bkt; uint64_t lock_sema; int16_t rem; - bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags); - __retry: + tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt); + /* Get Bucket sema*/ lock_sema = tim_bkt_fetch_sema_lock(bkt); @@ -221,22 +252,20 @@ __retry: if (tim_bkt_get_nent(lock_sema) != 0) { uint64_t hbt_state; #ifdef RTE_ARCH_ARM64 - asm volatile( - " ldaxr %[hbt], [%[w1]] \n" - " tbz %[hbt], 33, dne%= \n" - " sevl \n" - "rty%=: wfe \n" - " ldaxr %[hbt], [%[w1]] \n" - " tbnz %[hbt], 33, rty%= \n" - "dne%=: \n" - : [hbt] "=&r" (hbt_state) - : [w1] "r" ((&bkt->w1)) - : "memory" - ); + asm volatile(" ldxr %[hbt], [%[w1]] \n" + " tbz %[hbt], 33, dne%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldxr %[hbt], [%[w1]] \n" + " tbnz %[hbt], 33, rty%= \n" + "dne%=: \n" + : [hbt] "=&r"(hbt_state) + : [w1] "r"((&bkt->w1)) + : "memory"); #else do { hbt_state = __atomic_load_n(&bkt->w1, - __ATOMIC_ACQUIRE); + __ATOMIC_RELAXED); } while (hbt_state & BIT_ULL(33)); #endif @@ -246,40 +275,38 @@ __retry: } } } - /* Insert the work. */ rem = tim_bkt_fetch_rem(lock_sema); if (!rem) { if (flags & OTX2_TIM_ENA_FB) - chunk = tim_refill_chunk(bkt, tim_ring); + chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring); if (flags & OTX2_TIM_ENA_DFB) - chunk = tim_insert_chunk(bkt, tim_ring); + chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring); if (unlikely(chunk == NULL)) { bkt->chunk_remainder = 0; - tim_bkt_dec_lock(bkt); tim->impl_opaque[0] = 0; tim->impl_opaque[1] = 0; tim->state = RTE_EVENT_TIMER_ERROR; + tim_bkt_dec_lock(bkt); return -ENOMEM; } - bkt->current_chunk = (uintptr_t)chunk; + mirr_bkt->current_chunk = (uintptr_t)chunk; bkt->chunk_remainder = tim_ring->nb_chunk_slots - 1; } else { - chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk; + chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk; chunk += tim_ring->nb_chunk_slots - rem; } /* Copy work entry. */ *chunk = *pent; - tim_bkt_inc_nent(bkt); - tim_bkt_dec_lock(bkt); - tim->impl_opaque[0] = (uintptr_t)chunk; tim->impl_opaque[1] = (uintptr_t)bkt; - tim->state = RTE_EVENT_TIMER_ARMED; + __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE); + tim_bkt_inc_nent(bkt); + tim_bkt_dec_lock_relaxed(bkt); return 0; } @@ -291,13 +318,14 @@ tim_add_entry_mp(struct otx2_tim_ring * const tim_ring, const struct otx2_tim_ent * const pent, const uint8_t flags) { + struct otx2_tim_bkt *mirr_bkt; struct otx2_tim_ent *chunk; struct otx2_tim_bkt *bkt; uint64_t lock_sema; int16_t rem; __retry: - bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags); + tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt); /* Get Bucket sema*/ lock_sema = tim_bkt_fetch_sema_lock(bkt); @@ -306,22 +334,20 @@ __retry: if (tim_bkt_get_nent(lock_sema) != 0) { uint64_t hbt_state; #ifdef RTE_ARCH_ARM64 - asm volatile( - " ldaxr %[hbt], [%[w1]] \n" - " tbz %[hbt], 33, dne%= \n" - " sevl \n" - "rty%=: wfe \n" - " ldaxr %[hbt], [%[w1]] \n" - " tbnz %[hbt], 33, rty%= \n" - "dne%=: \n" - : [hbt] "=&r" (hbt_state) - : [w1] "r" ((&bkt->w1)) - : "memory" - ); + asm volatile(" ldxr %[hbt], [%[w1]] \n" + " tbz %[hbt], 33, dne%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldxr %[hbt], [%[w1]] \n" + " tbnz %[hbt], 33, rty%= \n" + "dne%=: \n" + : [hbt] "=&r"(hbt_state) + : [w1] "r"((&bkt->w1)) + : "memory"); #else do { hbt_state = __atomic_load_n(&bkt->w1, - __ATOMIC_ACQUIRE); + __ATOMIC_RELAXED); } while (hbt_state & BIT_ULL(33)); #endif @@ -334,62 +360,62 @@ __retry: rem = tim_bkt_fetch_rem(lock_sema); if (rem < 0) { + tim_bkt_dec_lock(bkt); #ifdef RTE_ARCH_ARM64 - asm volatile( - " ldaxrh %w[rem], [%[crem]] \n" - " tbz %w[rem], 15, dne%= \n" - " sevl \n" - "rty%=: wfe \n" - " ldaxrh %w[rem], [%[crem]] \n" - " tbnz %w[rem], 15, rty%= \n" - "dne%=: \n" - : [rem] "=&r" (rem) - : [crem] "r" (&bkt->chunk_remainder) - : "memory" - ); + uint64_t w1; + asm volatile(" ldxr %[w1], [%[crem]] \n" + " tbz %[w1], 63, dne%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldxr %[w1], [%[crem]] \n" + " tbnz %[w1], 63, rty%= \n" + "dne%=: \n" + : [w1] "=&r"(w1) + : [crem] "r"(&bkt->w1) + : "memory"); #else - while (__atomic_load_n(&bkt->chunk_remainder, - __ATOMIC_ACQUIRE) < 0) + while (__atomic_load_n((int64_t *)&bkt->w1, __ATOMIC_RELAXED) < + 0) ; #endif - /* Goto diff bucket. */ - tim_bkt_dec_lock(bkt); goto __retry; } else if (!rem) { /* Only one thread can be here*/ if (flags & OTX2_TIM_ENA_FB) - chunk = tim_refill_chunk(bkt, tim_ring); + chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring); if (flags & OTX2_TIM_ENA_DFB) - chunk = tim_insert_chunk(bkt, tim_ring); + chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring); if (unlikely(chunk == NULL)) { - tim_bkt_set_rem(bkt, 0); - tim_bkt_dec_lock(bkt); tim->impl_opaque[0] = 0; tim->impl_opaque[1] = 0; tim->state = RTE_EVENT_TIMER_ERROR; + tim_bkt_set_rem(bkt, 0); + tim_bkt_dec_lock(bkt); return -ENOMEM; } *chunk = *pent; - while (tim_bkt_fetch_lock(lock_sema) != - (-tim_bkt_fetch_rem(lock_sema))) - lock_sema = __atomic_load_n(&bkt->w1, __ATOMIC_ACQUIRE); - - bkt->current_chunk = (uintptr_t)chunk; + if (tim_bkt_fetch_lock(lock_sema)) { + do { + lock_sema = __atomic_load_n(&bkt->w1, + __ATOMIC_RELAXED); + } while (tim_bkt_fetch_lock(lock_sema) - 1); + rte_atomic_thread_fence(__ATOMIC_ACQUIRE); + } + mirr_bkt->current_chunk = (uintptr_t)chunk; __atomic_store_n(&bkt->chunk_remainder, tim_ring->nb_chunk_slots - 1, __ATOMIC_RELEASE); } else { - chunk = (struct otx2_tim_ent *)bkt->current_chunk; + chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk; chunk += tim_ring->nb_chunk_slots - rem; *chunk = *pent; } - /* Copy work entry. */ - tim_bkt_inc_nent(bkt); - tim_bkt_dec_lock(bkt); tim->impl_opaque[0] = (uintptr_t)chunk; tim->impl_opaque[1] = (uintptr_t)bkt; - tim->state = RTE_EVENT_TIMER_ARMED; + __atomic_store_n(&tim->state, RTE_EVENT_TIMER_ARMED, __ATOMIC_RELEASE); + tim_bkt_inc_nent(bkt); + tim_bkt_dec_lock_relaxed(bkt); return 0; } @@ -420,6 +446,7 @@ tim_add_entry_brst(struct otx2_tim_ring * const tim_ring, const uint16_t nb_timers, const uint8_t flags) { struct otx2_tim_ent *chunk = NULL; + struct otx2_tim_bkt *mirr_bkt; struct otx2_tim_bkt *bkt; uint16_t chunk_remainder; uint16_t index = 0; @@ -428,7 +455,7 @@ tim_add_entry_brst(struct otx2_tim_ring * const tim_ring, uint8_t lock_cnt; __retry: - bkt = tim_get_target_bucket(tim_ring, rel_bkt, flags); + tim_get_target_bucket(tim_ring, rel_bkt, &bkt, &mirr_bkt); /* Only one thread beyond this. */ lock_sema = tim_bkt_inc_lock(bkt); @@ -437,6 +464,23 @@ __retry: if (lock_cnt) { tim_bkt_dec_lock(bkt); +#ifdef RTE_ARCH_ARM64 + asm volatile(" ldxrb %w[lock_cnt], [%[lock]] \n" + " tst %w[lock_cnt], 255 \n" + " beq dne%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldxrb %w[lock_cnt], [%[lock]] \n" + " tst %w[lock_cnt], 255 \n" + " bne rty%= \n" + "dne%=: \n" + : [lock_cnt] "=&r"(lock_cnt) + : [lock] "r"(&bkt->lock) + : "memory"); +#else + while (__atomic_load_n(&bkt->lock, __ATOMIC_RELAXED)) + ; +#endif goto __retry; } @@ -445,22 +489,20 @@ __retry: if (tim_bkt_get_nent(lock_sema) != 0) { uint64_t hbt_state; #ifdef RTE_ARCH_ARM64 - asm volatile( - " ldaxr %[hbt], [%[w1]] \n" - " tbz %[hbt], 33, dne%= \n" - " sevl \n" - "rty%=: wfe \n" - " ldaxr %[hbt], [%[w1]] \n" - " tbnz %[hbt], 33, rty%= \n" - "dne%=: \n" - : [hbt] "=&r" (hbt_state) - : [w1] "r" ((&bkt->w1)) - : "memory" - ); + asm volatile(" ldxr %[hbt], [%[w1]] \n" + " tbz %[hbt], 33, dne%= \n" + " sevl \n" + "rty%=: wfe \n" + " ldxr %[hbt], [%[w1]] \n" + " tbnz %[hbt], 33, rty%= \n" + "dne%=: \n" + : [hbt] "=&r"(hbt_state) + : [w1] "r"((&bkt->w1)) + : "memory"); #else do { hbt_state = __atomic_load_n(&bkt->w1, - __ATOMIC_ACQUIRE); + __ATOMIC_RELAXED); } while (hbt_state & BIT_ULL(33)); #endif @@ -477,7 +519,7 @@ __retry: crem = tim_ring->nb_chunk_slots - chunk_remainder; if (chunk_remainder && crem) { chunk = ((struct otx2_tim_ent *) - (uintptr_t)bkt->current_chunk) + crem; + mirr_bkt->current_chunk) + crem; index = tim_cpy_wrk(index, chunk_remainder, chunk, tim, ents, bkt); @@ -486,9 +528,9 @@ __retry: } if (flags & OTX2_TIM_ENA_FB) - chunk = tim_refill_chunk(bkt, tim_ring); + chunk = tim_refill_chunk(bkt, mirr_bkt, tim_ring); if (flags & OTX2_TIM_ENA_DFB) - chunk = tim_insert_chunk(bkt, tim_ring); + chunk = tim_insert_chunk(bkt, mirr_bkt, tim_ring); if (unlikely(chunk == NULL)) { tim_bkt_dec_lock(bkt); @@ -497,14 +539,14 @@ __retry: return crem; } *(uint64_t *)(chunk + tim_ring->nb_chunk_slots) = 0; - bkt->current_chunk = (uintptr_t)chunk; + mirr_bkt->current_chunk = (uintptr_t)chunk; tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt); rem = nb_timers - chunk_remainder; tim_bkt_set_rem(bkt, tim_ring->nb_chunk_slots - rem); tim_bkt_add_nent(bkt, rem); } else { - chunk = (struct otx2_tim_ent *)(uintptr_t)bkt->current_chunk; + chunk = (struct otx2_tim_ent *)mirr_bkt->current_chunk; chunk += (tim_ring->nb_chunk_slots - chunk_remainder); tim_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt); @@ -537,19 +579,18 @@ tim_rm_entry(struct rte_event_timer *tim) bkt = (struct otx2_tim_bkt *)(uintptr_t)tim->impl_opaque[1]; lock_sema = tim_bkt_inc_lock(bkt); if (tim_bkt_get_hbt(lock_sema) || !tim_bkt_get_nent(lock_sema)) { - tim_bkt_dec_lock(bkt); tim->impl_opaque[0] = 0; tim->impl_opaque[1] = 0; + tim_bkt_dec_lock(bkt); return -ENOENT; } entry->w0 = 0; entry->wqe = 0; - tim_bkt_dec_lock(bkt); - tim->state = RTE_EVENT_TIMER_CANCELED; tim->impl_opaque[0] = 0; tim->impl_opaque[1] = 0; + tim_bkt_dec_lock(bkt); return 0; }