1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <sys/queue.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_per_lcore.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
20 #include <rte_lcore.h>
21 #include <rte_branch_prediction.h>
22 #include <rte_spinlock.h>
23 #include <rte_random.h>
24 #include <rte_pause.h>
25 #include <rte_memzone.h>
26 #include <rte_malloc.h>
27 #include <rte_errno.h>
29 #include "rte_timer.h"
32 * Per-lcore info for timers.
35 struct rte_timer pending_head; /**< dummy timer instance to head up list */
36 rte_spinlock_t list_lock; /**< lock to protect list access */
38 /** per-core variable that true if a timer was updated on this
39 * core since last reset of the variable */
42 /** track the current depth of the skiplist */
43 unsigned curr_skiplist_depth;
45 unsigned prev_lcore; /**< used for lcore round robin */
47 /** running timer on this lcore now */
48 struct rte_timer *running_tim;
50 #ifdef RTE_LIBRTE_TIMER_DEBUG
51 /** per-lcore statistics */
52 struct rte_timer_debug_stats stats;
54 } __rte_cache_aligned;
56 #define FL_ALLOCATED (1 << 0)
57 struct rte_timer_data {
58 struct priv_timer priv_timer[RTE_MAX_LCORE];
59 uint8_t internal_flags;
62 #define RTE_MAX_DATA_ELS 64
63 static const struct rte_memzone *rte_timer_data_mz;
64 static int *volatile rte_timer_mz_refcnt;
65 static struct rte_timer_data *rte_timer_data_arr;
66 static const uint32_t default_data_id;
67 static uint32_t rte_timer_subsystem_initialized;
69 /* when debug is enabled, store some statistics */
70 #ifdef RTE_LIBRTE_TIMER_DEBUG
71 #define __TIMER_STAT_ADD(priv_timer, name, n) do { \
72 unsigned __lcore_id = rte_lcore_id(); \
73 if (__lcore_id < RTE_MAX_LCORE) \
74 priv_timer[__lcore_id].stats.name += (n); \
77 #define __TIMER_STAT_ADD(priv_timer, name, n) do {} while (0)
81 timer_data_valid(uint32_t id)
83 return rte_timer_data_arr &&
84 (rte_timer_data_arr[id].internal_flags & FL_ALLOCATED);
87 /* validate ID and retrieve timer data pointer, or return error value */
88 #define TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, retval) do { \
89 if (id >= RTE_MAX_DATA_ELS || !timer_data_valid(id)) \
91 timer_data = &rte_timer_data_arr[id]; \
95 rte_timer_data_alloc(uint32_t *id_ptr)
98 struct rte_timer_data *data;
100 if (!rte_timer_subsystem_initialized)
103 for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
104 data = &rte_timer_data_arr[i];
105 if (!(data->internal_flags & FL_ALLOCATED)) {
106 data->internal_flags |= FL_ALLOCATED;
119 rte_timer_data_dealloc(uint32_t id)
121 struct rte_timer_data *timer_data;
122 TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, -EINVAL);
124 timer_data->internal_flags &= ~(FL_ALLOCATED);
129 /* Init the timer library. Allocate an array of timer data structs in shared
130 * memory, and allocate the zeroth entry for use with original timer
131 * APIs. Since the intersection of the sets of lcore ids in primary and
132 * secondary processes should be empty, the zeroth entry can be shared by
133 * multiple processes.
136 rte_timer_subsystem_init(void)
138 const struct rte_memzone *mz;
139 struct rte_timer_data *data;
141 static const char *mz_name = "rte_timer_mz";
142 const size_t data_arr_size =
143 RTE_MAX_DATA_ELS * sizeof(*rte_timer_data_arr);
144 const size_t mem_size = data_arr_size + sizeof(*rte_timer_mz_refcnt);
145 bool do_full_init = true;
147 rte_mcfg_timer_lock();
149 if (rte_timer_subsystem_initialized) {
150 rte_mcfg_timer_unlock();
154 mz = rte_memzone_lookup(mz_name);
156 mz = rte_memzone_reserve_aligned(mz_name, mem_size,
157 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
159 rte_mcfg_timer_unlock();
164 do_full_init = false;
166 rte_timer_data_mz = mz;
167 rte_timer_data_arr = mz->addr;
168 rte_timer_mz_refcnt = (void *)((char *)mz->addr + data_arr_size);
171 for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
172 data = &rte_timer_data_arr[i];
174 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE;
177 &data->priv_timer[lcore_id].list_lock);
178 data->priv_timer[lcore_id].prev_lcore =
184 rte_timer_data_arr[default_data_id].internal_flags |= FL_ALLOCATED;
185 (*rte_timer_mz_refcnt)++;
187 rte_timer_subsystem_initialized = 1;
189 rte_mcfg_timer_unlock();
195 rte_timer_subsystem_finalize(void)
197 rte_mcfg_timer_lock();
199 if (!rte_timer_subsystem_initialized) {
200 rte_mcfg_timer_unlock();
204 if (--(*rte_timer_mz_refcnt) == 0)
205 rte_memzone_free(rte_timer_data_mz);
207 rte_timer_subsystem_initialized = 0;
209 rte_mcfg_timer_unlock();
212 /* Initialize the timer handle tim for use */
214 rte_timer_init(struct rte_timer *tim)
216 union rte_timer_status status;
218 status.state = RTE_TIMER_STOP;
219 status.owner = RTE_TIMER_NO_OWNER;
220 __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELAXED);
224 * if timer is pending or stopped (or running on the same core than
225 * us), mark timer as configuring, and on success return the previous
226 * status of the timer
229 timer_set_config_state(struct rte_timer *tim,
230 union rte_timer_status *ret_prev_status,
231 struct priv_timer *priv_timer)
233 union rte_timer_status prev_status, status;
237 lcore_id = rte_lcore_id();
239 /* wait that the timer is in correct status before update,
240 * and mark it as being configured */
241 prev_status.u32 = __atomic_load_n(&tim->status.u32, __ATOMIC_RELAXED);
243 while (success == 0) {
244 /* timer is running on another core
245 * or ready to run on local core, exit
247 if (prev_status.state == RTE_TIMER_RUNNING &&
248 (prev_status.owner != (uint16_t)lcore_id ||
249 tim != priv_timer[lcore_id].running_tim))
252 /* timer is being configured on another core */
253 if (prev_status.state == RTE_TIMER_CONFIG)
256 /* here, we know that timer is stopped or pending,
257 * mark it atomically as being configured */
258 status.state = RTE_TIMER_CONFIG;
259 status.owner = (int16_t)lcore_id;
260 /* CONFIG states are acting as locked states. If the
261 * timer is in CONFIG state, the state cannot be changed
262 * by other threads. So, we should use ACQUIRE here.
264 success = __atomic_compare_exchange_n(&tim->status.u32,
271 ret_prev_status->u32 = prev_status.u32;
276 * if timer is pending, mark timer as running
279 timer_set_running_state(struct rte_timer *tim)
281 union rte_timer_status prev_status, status;
282 unsigned lcore_id = rte_lcore_id();
285 /* wait that the timer is in correct status before update,
286 * and mark it as running */
287 prev_status.u32 = __atomic_load_n(&tim->status.u32, __ATOMIC_RELAXED);
289 while (success == 0) {
290 /* timer is not pending anymore */
291 if (prev_status.state != RTE_TIMER_PENDING)
294 /* we know that the timer will be pending at this point
295 * mark it atomically as being running
297 status.state = RTE_TIMER_RUNNING;
298 status.owner = (int16_t)lcore_id;
299 /* RUNNING states are acting as locked states. If the
300 * timer is in RUNNING state, the state cannot be changed
301 * by other threads. So, we should use ACQUIRE here.
303 success = __atomic_compare_exchange_n(&tim->status.u32,
314 * Return a skiplist level for a new entry.
315 * This probabilistically gives a level with p=1/4 that an entry at level n
316 * will also appear at level n+1.
319 timer_get_skiplist_level(unsigned curr_depth)
321 #ifdef RTE_LIBRTE_TIMER_DEBUG
322 static uint32_t i, count = 0;
323 static uint32_t levels[MAX_SKIPLIST_DEPTH] = {0};
326 /* probability value is 1/4, i.e. all at level 0, 1 in 4 is at level 1,
327 * 1 in 16 at level 2, 1 in 64 at level 3, etc. Calculated using lowest
328 * bit position of a (pseudo)random number.
330 uint32_t rand = rte_rand() & (UINT32_MAX - 1);
331 uint32_t level = rand == 0 ? MAX_SKIPLIST_DEPTH : (rte_bsf32(rand)-1) / 2;
333 /* limit the levels used to one above our current level, so we don't,
334 * for instance, have a level 0 and a level 7 without anything between
336 if (level > curr_depth)
338 if (level >= MAX_SKIPLIST_DEPTH)
339 level = MAX_SKIPLIST_DEPTH-1;
340 #ifdef RTE_LIBRTE_TIMER_DEBUG
343 if (count % 10000 == 0)
344 for (i = 0; i < MAX_SKIPLIST_DEPTH; i++)
345 printf("Level %u: %u\n", (unsigned)i, (unsigned)levels[i]);
351 * For a given time value, get the entries at each level which
352 * are <= that time value.
355 timer_get_prev_entries(uint64_t time_val, unsigned tim_lcore,
356 struct rte_timer **prev, struct priv_timer *priv_timer)
358 unsigned lvl = priv_timer[tim_lcore].curr_skiplist_depth;
359 prev[lvl] = &priv_timer[tim_lcore].pending_head;
362 prev[lvl] = prev[lvl+1];
363 while (prev[lvl]->sl_next[lvl] &&
364 prev[lvl]->sl_next[lvl]->expire <= time_val)
365 prev[lvl] = prev[lvl]->sl_next[lvl];
370 * Given a timer node in the skiplist, find the previous entries for it at
371 * all skiplist levels.
374 timer_get_prev_entries_for_node(struct rte_timer *tim, unsigned tim_lcore,
375 struct rte_timer **prev,
376 struct priv_timer *priv_timer)
380 /* to get a specific entry in the list, look for just lower than the time
381 * values, and then increment on each level individually if necessary
383 timer_get_prev_entries(tim->expire - 1, tim_lcore, prev, priv_timer);
384 for (i = priv_timer[tim_lcore].curr_skiplist_depth - 1; i >= 0; i--) {
385 while (prev[i]->sl_next[i] != NULL &&
386 prev[i]->sl_next[i] != tim &&
387 prev[i]->sl_next[i]->expire <= tim->expire)
388 prev[i] = prev[i]->sl_next[i];
392 /* call with lock held as necessary
394 * timer must be in config state
395 * timer must not be in a list
398 timer_add(struct rte_timer *tim, unsigned int tim_lcore,
399 struct priv_timer *priv_timer)
402 struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
404 /* find where exactly this element goes in the list of elements
406 timer_get_prev_entries(tim->expire, tim_lcore, prev, priv_timer);
408 /* now assign it a new level and add at that level */
409 const unsigned tim_level = timer_get_skiplist_level(
410 priv_timer[tim_lcore].curr_skiplist_depth);
411 if (tim_level == priv_timer[tim_lcore].curr_skiplist_depth)
412 priv_timer[tim_lcore].curr_skiplist_depth++;
416 tim->sl_next[lvl] = prev[lvl]->sl_next[lvl];
417 prev[lvl]->sl_next[lvl] = tim;
420 tim->sl_next[0] = prev[0]->sl_next[0];
421 prev[0]->sl_next[0] = tim;
423 /* save the lowest list entry into the expire field of the dummy hdr
424 * NOTE: this is not atomic on 32-bit*/
425 priv_timer[tim_lcore].pending_head.expire = priv_timer[tim_lcore].\
426 pending_head.sl_next[0]->expire;
430 * del from list, lock if needed
431 * timer must be in config state
432 * timer must be in a list
435 timer_del(struct rte_timer *tim, union rte_timer_status prev_status,
436 int local_is_locked, struct priv_timer *priv_timer)
438 unsigned lcore_id = rte_lcore_id();
439 unsigned prev_owner = prev_status.owner;
441 struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
443 /* if timer needs is pending another core, we need to lock the
444 * list; if it is on local core, we need to lock if we are not
445 * called from rte_timer_manage() */
446 if (prev_owner != lcore_id || !local_is_locked)
447 rte_spinlock_lock(&priv_timer[prev_owner].list_lock);
449 /* save the lowest list entry into the expire field of the dummy hdr.
450 * NOTE: this is not atomic on 32-bit */
451 if (tim == priv_timer[prev_owner].pending_head.sl_next[0])
452 priv_timer[prev_owner].pending_head.expire =
453 ((tim->sl_next[0] == NULL) ? 0 : tim->sl_next[0]->expire);
455 /* adjust pointers from previous entries to point past this */
456 timer_get_prev_entries_for_node(tim, prev_owner, prev, priv_timer);
457 for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--) {
458 if (prev[i]->sl_next[i] == tim)
459 prev[i]->sl_next[i] = tim->sl_next[i];
462 /* in case we deleted last entry at a level, adjust down max level */
463 for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
464 if (priv_timer[prev_owner].pending_head.sl_next[i] == NULL)
465 priv_timer[prev_owner].curr_skiplist_depth --;
469 if (prev_owner != lcore_id || !local_is_locked)
470 rte_spinlock_unlock(&priv_timer[prev_owner].list_lock);
473 /* Reset and start the timer associated with the timer handle (private func) */
475 __rte_timer_reset(struct rte_timer *tim, uint64_t expire,
476 uint64_t period, unsigned tim_lcore,
477 rte_timer_cb_t fct, void *arg,
479 struct rte_timer_data *timer_data)
481 union rte_timer_status prev_status, status;
483 unsigned lcore_id = rte_lcore_id();
484 struct priv_timer *priv_timer = timer_data->priv_timer;
486 /* round robin for tim_lcore */
487 if (tim_lcore == (unsigned)LCORE_ID_ANY) {
488 if (lcore_id < RTE_MAX_LCORE) {
489 /* EAL thread with valid lcore_id */
490 tim_lcore = rte_get_next_lcore(
491 priv_timer[lcore_id].prev_lcore,
493 priv_timer[lcore_id].prev_lcore = tim_lcore;
495 /* non-EAL thread do not run rte_timer_manage(),
496 * so schedule the timer on the first enabled lcore. */
497 tim_lcore = rte_get_next_lcore(LCORE_ID_ANY, 0, 1);
500 /* wait that the timer is in correct status before update,
501 * and mark it as being configured */
502 ret = timer_set_config_state(tim, &prev_status, priv_timer);
506 __TIMER_STAT_ADD(priv_timer, reset, 1);
507 if (prev_status.state == RTE_TIMER_RUNNING &&
508 lcore_id < RTE_MAX_LCORE) {
509 priv_timer[lcore_id].updated = 1;
512 /* remove it from list */
513 if (prev_status.state == RTE_TIMER_PENDING) {
514 timer_del(tim, prev_status, local_is_locked, priv_timer);
515 __TIMER_STAT_ADD(priv_timer, pending, -1);
518 tim->period = period;
519 tim->expire = expire;
523 /* if timer needs to be scheduled on another core, we need to
524 * lock the destination list; if it is on local core, we need to lock if
525 * we are not called from rte_timer_manage()
527 if (tim_lcore != lcore_id || !local_is_locked)
528 rte_spinlock_lock(&priv_timer[tim_lcore].list_lock);
530 __TIMER_STAT_ADD(priv_timer, pending, 1);
531 timer_add(tim, tim_lcore, priv_timer);
533 /* update state: as we are in CONFIG state, only us can modify
534 * the state so we don't need to use cmpset() here */
535 status.state = RTE_TIMER_PENDING;
536 status.owner = (int16_t)tim_lcore;
537 /* The "RELEASE" ordering guarantees the memory operations above
538 * the status update are observed before the update by all threads
540 __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELEASE);
542 if (tim_lcore != lcore_id || !local_is_locked)
543 rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
548 /* Reset and start the timer associated with the timer handle tim */
550 rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
551 enum rte_timer_type type, unsigned int tim_lcore,
552 rte_timer_cb_t fct, void *arg)
554 return rte_timer_alt_reset(default_data_id, tim, ticks, type,
555 tim_lcore, fct, arg);
559 rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
560 uint64_t ticks, enum rte_timer_type type,
561 unsigned int tim_lcore, rte_timer_cb_t fct, void *arg)
563 uint64_t cur_time = rte_get_timer_cycles();
565 struct rte_timer_data *timer_data;
567 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
569 if (type == PERIODICAL)
574 return __rte_timer_reset(tim, cur_time + ticks, period, tim_lcore,
575 fct, arg, 0, timer_data);
578 /* loop until rte_timer_reset() succeed */
580 rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
581 enum rte_timer_type type, unsigned tim_lcore,
582 rte_timer_cb_t fct, void *arg)
584 while (rte_timer_reset(tim, ticks, type, tim_lcore,
590 __rte_timer_stop(struct rte_timer *tim, int local_is_locked,
591 struct rte_timer_data *timer_data)
593 union rte_timer_status prev_status, status;
594 unsigned lcore_id = rte_lcore_id();
596 struct priv_timer *priv_timer = timer_data->priv_timer;
598 /* wait that the timer is in correct status before update,
599 * and mark it as being configured */
600 ret = timer_set_config_state(tim, &prev_status, priv_timer);
604 __TIMER_STAT_ADD(priv_timer, stop, 1);
605 if (prev_status.state == RTE_TIMER_RUNNING &&
606 lcore_id < RTE_MAX_LCORE) {
607 priv_timer[lcore_id].updated = 1;
610 /* remove it from list */
611 if (prev_status.state == RTE_TIMER_PENDING) {
612 timer_del(tim, prev_status, local_is_locked, priv_timer);
613 __TIMER_STAT_ADD(priv_timer, pending, -1);
616 /* mark timer as stopped */
617 status.state = RTE_TIMER_STOP;
618 status.owner = RTE_TIMER_NO_OWNER;
619 /* The "RELEASE" ordering guarantees the memory operations above
620 * the status update are observed before the update by all threads
622 __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELEASE);
627 /* Stop the timer associated with the timer handle tim */
629 rte_timer_stop(struct rte_timer *tim)
631 return rte_timer_alt_stop(default_data_id, tim);
635 rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim)
637 struct rte_timer_data *timer_data;
639 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
641 return __rte_timer_stop(tim, 0, timer_data);
644 /* loop until rte_timer_stop() succeed */
646 rte_timer_stop_sync(struct rte_timer *tim)
648 while (rte_timer_stop(tim) != 0)
652 /* Test the PENDING status of the timer handle tim */
654 rte_timer_pending(struct rte_timer *tim)
656 return __atomic_load_n(&tim->status.state,
657 __ATOMIC_RELAXED) == RTE_TIMER_PENDING;
660 /* must be called periodically, run all timer that expired */
662 __rte_timer_manage(struct rte_timer_data *timer_data)
664 union rte_timer_status status;
665 struct rte_timer *tim, *next_tim;
666 struct rte_timer *run_first_tim, **pprev;
667 unsigned lcore_id = rte_lcore_id();
668 struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
671 struct priv_timer *priv_timer = timer_data->priv_timer;
673 /* timer manager only runs on EAL thread with valid lcore_id */
674 assert(lcore_id < RTE_MAX_LCORE);
676 __TIMER_STAT_ADD(priv_timer, manage, 1);
677 /* optimize for the case where per-cpu list is empty */
678 if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL)
680 cur_time = rte_get_timer_cycles();
683 /* on 64-bit the value cached in the pending_head.expired will be
684 * updated atomically, so we can consult that for a quick check here
685 * outside the lock */
686 if (likely(priv_timer[lcore_id].pending_head.expire > cur_time))
690 /* browse ordered list, add expired timers in 'expired' list */
691 rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
693 /* if nothing to do just unlock and return */
694 if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL ||
695 priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time) {
696 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
700 /* save start of list of expired timers */
701 tim = priv_timer[lcore_id].pending_head.sl_next[0];
703 /* break the existing list at current time point */
704 timer_get_prev_entries(cur_time, lcore_id, prev, priv_timer);
705 for (i = priv_timer[lcore_id].curr_skiplist_depth -1; i >= 0; i--) {
706 if (prev[i] == &priv_timer[lcore_id].pending_head)
708 priv_timer[lcore_id].pending_head.sl_next[i] =
710 if (prev[i]->sl_next[i] == NULL)
711 priv_timer[lcore_id].curr_skiplist_depth--;
712 prev[i] ->sl_next[i] = NULL;
715 /* transition run-list from PENDING to RUNNING */
717 pprev = &run_first_tim;
719 for ( ; tim != NULL; tim = next_tim) {
720 next_tim = tim->sl_next[0];
722 ret = timer_set_running_state(tim);
723 if (likely(ret == 0)) {
724 pprev = &tim->sl_next[0];
726 /* another core is trying to re-config this one,
727 * remove it from local expired list
733 /* update the next to expire timer value */
734 priv_timer[lcore_id].pending_head.expire =
735 (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 :
736 priv_timer[lcore_id].pending_head.sl_next[0]->expire;
738 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
740 /* now scan expired list and call callbacks */
741 for (tim = run_first_tim; tim != NULL; tim = next_tim) {
742 next_tim = tim->sl_next[0];
743 priv_timer[lcore_id].updated = 0;
744 priv_timer[lcore_id].running_tim = tim;
746 /* execute callback function with list unlocked */
747 tim->f(tim, tim->arg);
749 __TIMER_STAT_ADD(priv_timer, pending, -1);
750 /* the timer was stopped or reloaded by the callback
751 * function, we have nothing to do here */
752 if (priv_timer[lcore_id].updated == 1)
755 if (tim->period == 0) {
756 /* remove from done list and mark timer as stopped */
757 status.state = RTE_TIMER_STOP;
758 status.owner = RTE_TIMER_NO_OWNER;
759 /* The "RELEASE" ordering guarantees the memory
760 * operations above the status update are observed
761 * before the update by all threads
763 __atomic_store_n(&tim->status.u32, status.u32,
767 /* keep it in list and mark timer as pending */
768 rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
769 status.state = RTE_TIMER_PENDING;
770 __TIMER_STAT_ADD(priv_timer, pending, 1);
771 status.owner = (int16_t)lcore_id;
772 /* The "RELEASE" ordering guarantees the memory
773 * operations above the status update are observed
774 * before the update by all threads
776 __atomic_store_n(&tim->status.u32, status.u32,
778 __rte_timer_reset(tim, tim->expire + tim->period,
779 tim->period, lcore_id, tim->f, tim->arg, 1,
781 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
784 priv_timer[lcore_id].running_tim = NULL;
788 rte_timer_manage(void)
790 struct rte_timer_data *timer_data;
792 TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
794 __rte_timer_manage(timer_data);
800 rte_timer_alt_manage(uint32_t timer_data_id,
801 unsigned int *poll_lcores,
803 rte_timer_alt_manage_cb_t f)
805 unsigned int default_poll_lcores[] = {rte_lcore_id()};
806 union rte_timer_status status;
807 struct rte_timer *tim, *next_tim, **pprev;
808 struct rte_timer *run_first_tims[RTE_MAX_LCORE];
809 unsigned int this_lcore = rte_lcore_id();
810 struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
814 struct rte_timer_data *data;
815 struct priv_timer *privp;
818 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, data, -EINVAL);
820 /* timer manager only runs on EAL thread with valid lcore_id */
821 assert(this_lcore < RTE_MAX_LCORE);
823 __TIMER_STAT_ADD(data->priv_timer, manage, 1);
825 if (poll_lcores == NULL) {
826 poll_lcores = default_poll_lcores;
827 nb_poll_lcores = RTE_DIM(default_poll_lcores);
830 for (i = 0; i < nb_poll_lcores; i++) {
831 poll_lcore = poll_lcores[i];
832 privp = &data->priv_timer[poll_lcore];
834 /* optimize for the case where per-cpu list is empty */
835 if (privp->pending_head.sl_next[0] == NULL)
837 cur_time = rte_get_timer_cycles();
840 /* on 64-bit the value cached in the pending_head.expired will
841 * be updated atomically, so we can consult that for a quick
842 * check here outside the lock
844 if (likely(privp->pending_head.expire > cur_time))
848 /* browse ordered list, add expired timers in 'expired' list */
849 rte_spinlock_lock(&privp->list_lock);
851 /* if nothing to do just unlock and return */
852 if (privp->pending_head.sl_next[0] == NULL ||
853 privp->pending_head.sl_next[0]->expire > cur_time) {
854 rte_spinlock_unlock(&privp->list_lock);
858 /* save start of list of expired timers */
859 tim = privp->pending_head.sl_next[0];
861 /* break the existing list at current time point */
862 timer_get_prev_entries(cur_time, poll_lcore, prev,
864 for (j = privp->curr_skiplist_depth - 1; j >= 0; j--) {
865 if (prev[j] == &privp->pending_head)
867 privp->pending_head.sl_next[j] =
869 if (prev[j]->sl_next[j] == NULL)
870 privp->curr_skiplist_depth--;
872 prev[j]->sl_next[j] = NULL;
875 /* transition run-list from PENDING to RUNNING */
876 run_first_tims[nb_runlists] = tim;
877 pprev = &run_first_tims[nb_runlists];
880 for ( ; tim != NULL; tim = next_tim) {
881 next_tim = tim->sl_next[0];
883 ret = timer_set_running_state(tim);
884 if (likely(ret == 0)) {
885 pprev = &tim->sl_next[0];
887 /* another core is trying to re-config this one,
888 * remove it from local expired list
894 /* update the next to expire timer value */
895 privp->pending_head.expire =
896 (privp->pending_head.sl_next[0] == NULL) ? 0 :
897 privp->pending_head.sl_next[0]->expire;
899 rte_spinlock_unlock(&privp->list_lock);
902 /* Now process the run lists */
905 uint64_t min_expire = UINT64_MAX;
908 /* Find the next oldest timer to process */
909 for (i = 0; i < nb_runlists; i++) {
910 tim = run_first_tims[i];
912 if (tim != NULL && tim->expire < min_expire) {
913 min_expire = tim->expire;
922 tim = run_first_tims[min_idx];
924 /* Move down the runlist from which we picked a timer to
927 run_first_tims[min_idx] = run_first_tims[min_idx]->sl_next[0];
929 data->priv_timer[this_lcore].updated = 0;
930 data->priv_timer[this_lcore].running_tim = tim;
932 /* Call the provided callback function */
935 __TIMER_STAT_ADD(data->priv_timer, pending, -1);
937 /* the timer was stopped or reloaded by the callback
938 * function, we have nothing to do here
940 if (data->priv_timer[this_lcore].updated == 1)
943 if (tim->period == 0) {
944 /* remove from done list and mark timer as stopped */
945 status.state = RTE_TIMER_STOP;
946 status.owner = RTE_TIMER_NO_OWNER;
947 /* The "RELEASE" ordering guarantees the memory
948 * operations above the status update are observed
949 * before the update by all threads
951 __atomic_store_n(&tim->status.u32, status.u32,
954 /* keep it in list and mark timer as pending */
956 &data->priv_timer[this_lcore].list_lock);
957 status.state = RTE_TIMER_PENDING;
958 __TIMER_STAT_ADD(data->priv_timer, pending, 1);
959 status.owner = (int16_t)this_lcore;
960 /* The "RELEASE" ordering guarantees the memory
961 * operations above the status update are observed
962 * before the update by all threads
964 __atomic_store_n(&tim->status.u32, status.u32,
966 __rte_timer_reset(tim, tim->expire + tim->period,
967 tim->period, this_lcore, tim->f, tim->arg, 1,
970 &data->priv_timer[this_lcore].list_lock);
973 data->priv_timer[this_lcore].running_tim = NULL;
979 /* Walk pending lists, stopping timers and calling user-specified function */
981 rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores,
983 rte_timer_stop_all_cb_t f, void *f_arg)
986 struct priv_timer *priv_timer;
988 struct rte_timer *tim, *next_tim;
989 struct rte_timer_data *timer_data;
991 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
993 for (i = 0; i < nb_walk_lcores; i++) {
994 walk_lcore = walk_lcores[i];
995 priv_timer = &timer_data->priv_timer[walk_lcore];
997 rte_spinlock_lock(&priv_timer->list_lock);
999 for (tim = priv_timer->pending_head.sl_next[0];
1002 next_tim = tim->sl_next[0];
1004 /* Call timer_stop with lock held */
1005 __rte_timer_stop(tim, 1, timer_data);
1011 rte_spinlock_unlock(&priv_timer->list_lock);
1018 rte_timer_next_ticks(void)
1020 unsigned int lcore_id = rte_lcore_id();
1021 struct rte_timer_data *timer_data;
1022 struct priv_timer *priv_timer;
1023 const struct rte_timer *tm;
1025 int64_t left = -ENOENT;
1027 TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
1029 priv_timer = timer_data->priv_timer;
1030 cur_time = rte_get_timer_cycles();
1032 rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
1033 tm = priv_timer[lcore_id].pending_head.sl_next[0];
1035 left = tm->expire - cur_time;
1039 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
1044 /* dump statistics about timers */
1046 __rte_timer_dump_stats(struct rte_timer_data *timer_data __rte_unused, FILE *f)
1048 #ifdef RTE_LIBRTE_TIMER_DEBUG
1049 struct rte_timer_debug_stats sum;
1051 struct priv_timer *priv_timer = timer_data->priv_timer;
1053 memset(&sum, 0, sizeof(sum));
1054 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1055 sum.reset += priv_timer[lcore_id].stats.reset;
1056 sum.stop += priv_timer[lcore_id].stats.stop;
1057 sum.manage += priv_timer[lcore_id].stats.manage;
1058 sum.pending += priv_timer[lcore_id].stats.pending;
1060 fprintf(f, "Timer statistics:\n");
1061 fprintf(f, " reset = %"PRIu64"\n", sum.reset);
1062 fprintf(f, " stop = %"PRIu64"\n", sum.stop);
1063 fprintf(f, " manage = %"PRIu64"\n", sum.manage);
1064 fprintf(f, " pending = %"PRIu64"\n", sum.pending);
1066 fprintf(f, "No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n");
1071 rte_timer_dump_stats(FILE *f)
1073 return rte_timer_alt_dump_stats(default_data_id, f);
1077 rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused, FILE *f)
1079 struct rte_timer_data *timer_data;
1081 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
1083 __rte_timer_dump_stats(timer_data, f);