1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include <eventdev_pmd.h>
7 #include <rte_common.h>
8 #include <rte_branch_prediction.h>
10 #include "timvf_evdev.h"
13 timr_bkt_fetch_rem(uint64_t w1)
15 return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
16 TIM_BUCKET_W1_M_CHUNK_REMAINDER;
20 timr_bkt_get_rem(struct tim_mem_bucket *bktp)
22 return __atomic_load_n(&bktp->chunk_remainder,
27 timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
29 __atomic_store_n(&bktp->chunk_remainder, v,
34 timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
36 __atomic_fetch_sub(&bktp->chunk_remainder, v,
41 timr_bkt_get_sbt(uint64_t w1)
43 return (w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT;
46 static inline uint64_t
47 timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
49 const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
50 return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
53 static inline uint64_t
54 timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
56 const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
57 return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
61 timr_bkt_get_shbt(uint64_t w1)
63 return ((w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT) |
64 ((w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT);
68 timr_bkt_get_hbt(uint64_t w1)
70 return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
74 timr_bkt_get_bsk(uint64_t w1)
76 return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
79 static inline uint64_t
80 timr_bkt_clr_bsk(struct tim_mem_bucket *bktp)
82 /*Clear everything except lock. */
83 const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
84 return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
87 static inline uint64_t
88 timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
90 return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
94 static inline uint64_t
95 timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
97 return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
101 static inline uint64_t
102 timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
104 const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
105 return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
109 timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
111 __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
114 static inline uint32_t
115 timr_bkt_get_nent(uint64_t w1)
117 return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
118 TIM_BUCKET_W1_M_NUM_ENTRIES;
122 timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
124 __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
128 timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
130 __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
133 static inline uint64_t
134 timr_bkt_clr_nent(struct tim_mem_bucket *bktp)
136 const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
137 TIM_BUCKET_W1_S_NUM_ENTRIES);
138 return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
141 static inline struct tim_mem_entry *
142 timr_clr_bkt(struct timvf_ring * const timr, struct tim_mem_bucket * const bkt)
144 struct tim_mem_entry *chunk;
145 struct tim_mem_entry *pnext;
146 chunk = ((struct tim_mem_entry *)(uintptr_t)bkt->first_chunk);
147 chunk = (struct tim_mem_entry *)(uintptr_t)(chunk + nb_chunk_slots)->w0;
150 pnext = (struct tim_mem_entry *)(uintptr_t)
151 ((chunk + nb_chunk_slots)->w0);
152 rte_mempool_put(timr->chunk_pool, chunk);
155 return (struct tim_mem_entry *)(uintptr_t)bkt->first_chunk;
159 timvf_rem_entry(struct rte_event_timer *tim)
162 struct tim_mem_entry *entry;
163 struct tim_mem_bucket *bkt;
164 if (tim->impl_opaque[1] == 0 ||
165 tim->impl_opaque[0] == 0)
168 entry = (struct tim_mem_entry *)(uintptr_t)tim->impl_opaque[0];
169 if (entry->wqe != tim->ev.u64) {
170 tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
173 bkt = (struct tim_mem_bucket *)(uintptr_t)tim->impl_opaque[1];
174 lock_sema = timr_bkt_inc_lock(bkt);
175 if (timr_bkt_get_shbt(lock_sema)
176 || !timr_bkt_get_nent(lock_sema)) {
177 timr_bkt_dec_lock(bkt);
178 tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
182 entry->w0 = entry->wqe = 0;
183 timr_bkt_dec_lock(bkt);
185 tim->state = RTE_EVENT_TIMER_CANCELED;
186 tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
190 static inline struct tim_mem_entry *
191 timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
192 struct timvf_ring * const timr)
194 struct tim_mem_entry *chunk;
196 if (bkt->nb_entry || !bkt->first_chunk) {
197 if (unlikely(rte_mempool_get(timr->chunk_pool,
202 *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
203 bkt->current_chunk) +
207 bkt->first_chunk = (uintptr_t) chunk;
210 chunk = timr_clr_bkt(timr, bkt);
211 bkt->first_chunk = (uintptr_t)chunk;
213 *(uint64_t *)(chunk + nb_chunk_slots) = 0;
218 static inline struct tim_mem_entry *
219 timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
220 struct timvf_ring * const timr)
222 struct tim_mem_entry *chunk;
224 if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk)))
227 *(uint64_t *)(chunk + nb_chunk_slots) = 0;
229 *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
230 bkt->current_chunk) +
234 bkt->first_chunk = (uintptr_t) chunk;
240 static inline struct tim_mem_bucket *
241 timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
243 const uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
244 const uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
245 &timr->fast_div) + rel_bkt;
246 const uint32_t tbkt_id = timr->get_target_bkt(bucket,
248 return &timr->bkt[tbkt_id];
251 /* Single producer functions. */
253 timvf_add_entry_sp(struct timvf_ring * const timr, const uint32_t rel_bkt,
254 struct rte_event_timer * const tim,
255 const struct tim_mem_entry * const pent)
259 struct tim_mem_bucket *bkt;
260 struct tim_mem_entry *chunk;
263 bkt = timvf_get_target_bucket(timr, rel_bkt);
266 lock_sema = timr_bkt_fetch_sema(bkt);
267 /* Bucket related checks. */
268 if (unlikely(timr_bkt_get_hbt(lock_sema)))
271 /* Insert the work. */
272 rem = timr_bkt_fetch_rem(lock_sema);
275 chunk = timr->refill_chunk(bkt, timr);
276 if (unlikely(chunk == NULL)) {
277 timr_bkt_set_rem(bkt, 0);
278 tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
279 tim->state = RTE_EVENT_TIMER_ERROR;
282 bkt->current_chunk = (uintptr_t) chunk;
283 timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
285 chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
286 chunk += nb_chunk_slots - rem;
288 /* Copy work entry. */
290 timr_bkt_inc_nent(bkt);
292 tim->impl_opaque[0] = (uintptr_t)chunk;
293 tim->impl_opaque[1] = (uintptr_t)bkt;
294 tim->state = RTE_EVENT_TIMER_ARMED;
298 /* Multi producer functions. */
300 timvf_add_entry_mp(struct timvf_ring * const timr, const uint32_t rel_bkt,
301 struct rte_event_timer * const tim,
302 const struct tim_mem_entry * const pent)
306 struct tim_mem_bucket *bkt;
307 struct tim_mem_entry *chunk;
310 bkt = timvf_get_target_bucket(timr, rel_bkt);
311 /* Bucket related checks. */
313 lock_sema = timr_bkt_fetch_sema_lock(bkt);
314 if (unlikely(timr_bkt_get_shbt(lock_sema))) {
315 timr_bkt_dec_lock(bkt);
319 rem = timr_bkt_fetch_rem(lock_sema);
322 /* goto diff bucket. */
323 timr_bkt_dec_lock(bkt);
326 /*Only one thread can be here*/
327 chunk = timr->refill_chunk(bkt, timr);
328 if (unlikely(chunk == NULL)) {
329 timr_bkt_set_rem(bkt, 0);
330 timr_bkt_dec_lock(bkt);
331 tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
332 tim->state = RTE_EVENT_TIMER_ERROR;
335 bkt->current_chunk = (uintptr_t) chunk;
336 timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
338 chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
339 chunk += nb_chunk_slots - rem;
341 /* Copy work entry. */
343 timr_bkt_inc_nent(bkt);
344 timr_bkt_dec_lock(bkt);
346 tim->impl_opaque[0] = (uintptr_t)chunk;
347 tim->impl_opaque[1] = (uintptr_t)bkt;
348 tim->state = RTE_EVENT_TIMER_ARMED;
352 static inline uint16_t
353 timvf_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
354 struct tim_mem_entry *chunk,
355 struct rte_event_timer ** const tim,
356 const struct tim_mem_entry * const ents,
357 const struct tim_mem_bucket * const bkt)
359 for (; index < cpy_lmt; index++) {
360 *chunk = *(ents + index);
361 tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
362 tim[index]->impl_opaque[1] = (uintptr_t)bkt;
363 tim[index]->state = RTE_EVENT_TIMER_ARMED;
369 /* Burst mode functions */
371 timvf_add_entry_brst(struct timvf_ring * const timr, const uint16_t rel_bkt,
372 struct rte_event_timer ** const tim,
373 const struct tim_mem_entry *ents,
374 const uint16_t nb_timers)
380 uint16_t chunk_remainder;
382 struct tim_mem_bucket *bkt;
383 struct tim_mem_entry *chunk;
386 bkt = timvf_get_target_bucket(timr, rel_bkt);
388 /* Only one thread beyond this. */
389 lock_sema = timr_bkt_inc_lock(bkt);
391 ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
394 timr_bkt_dec_lock(bkt);
398 /* Bucket related checks. */
399 if (unlikely(timr_bkt_get_hbt(lock_sema))) {
400 timr_bkt_dec_lock(bkt);
404 chunk_remainder = timr_bkt_fetch_rem(lock_sema);
405 rem = chunk_remainder - nb_timers;
407 crem = nb_chunk_slots - chunk_remainder;
408 if (chunk_remainder && crem) {
409 chunk = ((struct tim_mem_entry *)
410 (uintptr_t)bkt->current_chunk) + crem;
412 index = timvf_cpy_wrk(index, chunk_remainder,
413 chunk, tim, ents, bkt);
414 timr_bkt_sub_rem(bkt, chunk_remainder);
415 timr_bkt_add_nent(bkt, chunk_remainder);
417 rem = nb_timers - chunk_remainder;
418 ents = ents + chunk_remainder;
420 chunk = timr->refill_chunk(bkt, timr);
421 if (unlikely(chunk == NULL)) {
422 timr_bkt_dec_lock(bkt);
424 tim[index]->state = RTE_EVENT_TIMER_ERROR;
427 *(uint64_t *)(chunk + nb_chunk_slots) = 0;
428 bkt->current_chunk = (uintptr_t) chunk;
430 index = timvf_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
431 timr_bkt_set_rem(bkt, nb_chunk_slots - rem);
432 timr_bkt_add_nent(bkt, rem);
434 chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
435 chunk += (nb_chunk_slots - chunk_remainder);
437 index = timvf_cpy_wrk(index, nb_timers,
438 chunk, tim, ents, bkt);
439 timr_bkt_sub_rem(bkt, nb_timers);
440 timr_bkt_add_nent(bkt, nb_timers);
443 timr_bkt_dec_lock(bkt);