1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <sys/queue.h>
13 #include <rte_atomic.h>
14 #include <rte_common.h>
15 #include <rte_cycles.h>
16 #include <rte_eal_memconfig.h>
17 #include <rte_per_lcore.h>
18 #include <rte_memory.h>
19 #include <rte_launch.h>
21 #include <rte_lcore.h>
22 #include <rte_branch_prediction.h>
23 #include <rte_spinlock.h>
24 #include <rte_random.h>
25 #include <rte_pause.h>
26 #include <rte_memzone.h>
27 #include <rte_malloc.h>
28 #include <rte_errno.h>
30 #include "rte_timer.h"
33 * Per-lcore info for timers.
36 struct rte_timer pending_head; /**< dummy timer instance to head up list */
37 rte_spinlock_t list_lock; /**< lock to protect list access */
39 /** per-core variable that true if a timer was updated on this
40 * core since last reset of the variable */
43 /** track the current depth of the skiplist */
44 unsigned curr_skiplist_depth;
46 unsigned prev_lcore; /**< used for lcore round robin */
48 /** running timer on this lcore now */
49 struct rte_timer *running_tim;
51 #ifdef RTE_LIBRTE_TIMER_DEBUG
52 /** per-lcore statistics */
53 struct rte_timer_debug_stats stats;
55 } __rte_cache_aligned;
57 #define FL_ALLOCATED (1 << 0)
58 struct rte_timer_data {
59 struct priv_timer priv_timer[RTE_MAX_LCORE];
60 uint8_t internal_flags;
63 #define RTE_MAX_DATA_ELS 64
64 static const struct rte_memzone *rte_timer_data_mz;
65 static int *volatile rte_timer_mz_refcnt;
66 static struct rte_timer_data *rte_timer_data_arr;
67 static const uint32_t default_data_id;
68 static uint32_t rte_timer_subsystem_initialized;
70 /* when debug is enabled, store some statistics */
71 #ifdef RTE_LIBRTE_TIMER_DEBUG
72 #define __TIMER_STAT_ADD(priv_timer, name, n) do { \
73 unsigned __lcore_id = rte_lcore_id(); \
74 if (__lcore_id < RTE_MAX_LCORE) \
75 priv_timer[__lcore_id].stats.name += (n); \
78 #define __TIMER_STAT_ADD(priv_timer, name, n) do {} while (0)
82 timer_data_valid(uint32_t id)
84 return rte_timer_data_arr &&
85 (rte_timer_data_arr[id].internal_flags & FL_ALLOCATED);
88 /* validate ID and retrieve timer data pointer, or return error value */
89 #define TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, retval) do { \
90 if (id >= RTE_MAX_DATA_ELS || !timer_data_valid(id)) \
92 timer_data = &rte_timer_data_arr[id]; \
96 rte_timer_data_alloc(uint32_t *id_ptr)
99 struct rte_timer_data *data;
101 if (!rte_timer_subsystem_initialized)
104 for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
105 data = &rte_timer_data_arr[i];
106 if (!(data->internal_flags & FL_ALLOCATED)) {
107 data->internal_flags |= FL_ALLOCATED;
120 rte_timer_data_dealloc(uint32_t id)
122 struct rte_timer_data *timer_data;
123 TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, -EINVAL);
125 timer_data->internal_flags &= ~(FL_ALLOCATED);
130 /* Init the timer library. Allocate an array of timer data structs in shared
131 * memory, and allocate the zeroth entry for use with original timer
132 * APIs. Since the intersection of the sets of lcore ids in primary and
133 * secondary processes should be empty, the zeroth entry can be shared by
134 * multiple processes.
137 rte_timer_subsystem_init(void)
139 const struct rte_memzone *mz;
140 struct rte_timer_data *data;
142 static const char *mz_name = "rte_timer_mz";
143 const size_t data_arr_size =
144 RTE_MAX_DATA_ELS * sizeof(*rte_timer_data_arr);
145 const size_t mem_size = data_arr_size + sizeof(*rte_timer_mz_refcnt);
146 bool do_full_init = true;
148 if (rte_timer_subsystem_initialized)
151 rte_mcfg_timer_lock();
153 mz = rte_memzone_lookup(mz_name);
155 mz = rte_memzone_reserve_aligned(mz_name, mem_size,
156 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
158 rte_mcfg_timer_unlock();
163 do_full_init = false;
165 rte_timer_data_mz = mz;
166 rte_timer_data_arr = mz->addr;
167 rte_timer_mz_refcnt = (void *)((char *)mz->addr + data_arr_size);
170 for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
171 data = &rte_timer_data_arr[i];
173 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE;
176 &data->priv_timer[lcore_id].list_lock);
177 data->priv_timer[lcore_id].prev_lcore =
183 rte_timer_data_arr[default_data_id].internal_flags |= FL_ALLOCATED;
184 (*rte_timer_mz_refcnt)++;
186 rte_mcfg_timer_unlock();
188 rte_timer_subsystem_initialized = 1;
194 rte_timer_subsystem_finalize(void)
196 if (!rte_timer_subsystem_initialized)
199 rte_mcfg_timer_lock();
201 if (--(*rte_timer_mz_refcnt) == 0)
202 rte_memzone_free(rte_timer_data_mz);
204 rte_mcfg_timer_unlock();
206 rte_timer_subsystem_initialized = 0;
209 /* Initialize the timer handle tim for use */
211 rte_timer_init(struct rte_timer *tim)
213 union rte_timer_status status;
215 status.state = RTE_TIMER_STOP;
216 status.owner = RTE_TIMER_NO_OWNER;
217 tim->status.u32 = status.u32;
221 * if timer is pending or stopped (or running on the same core than
222 * us), mark timer as configuring, and on success return the previous
223 * status of the timer
226 timer_set_config_state(struct rte_timer *tim,
227 union rte_timer_status *ret_prev_status,
228 struct priv_timer *priv_timer)
230 union rte_timer_status prev_status, status;
234 lcore_id = rte_lcore_id();
236 /* wait that the timer is in correct status before update,
237 * and mark it as being configured */
238 while (success == 0) {
239 prev_status.u32 = tim->status.u32;
241 /* timer is running on another core
242 * or ready to run on local core, exit
244 if (prev_status.state == RTE_TIMER_RUNNING &&
245 (prev_status.owner != (uint16_t)lcore_id ||
246 tim != priv_timer[lcore_id].running_tim))
249 /* timer is being configured on another core */
250 if (prev_status.state == RTE_TIMER_CONFIG)
253 /* here, we know that timer is stopped or pending,
254 * mark it atomically as being configured */
255 status.state = RTE_TIMER_CONFIG;
256 status.owner = (int16_t)lcore_id;
257 success = rte_atomic32_cmpset(&tim->status.u32,
262 ret_prev_status->u32 = prev_status.u32;
267 * if timer is pending, mark timer as running
270 timer_set_running_state(struct rte_timer *tim)
272 union rte_timer_status prev_status, status;
273 unsigned lcore_id = rte_lcore_id();
276 /* wait that the timer is in correct status before update,
277 * and mark it as running */
278 while (success == 0) {
279 prev_status.u32 = tim->status.u32;
281 /* timer is not pending anymore */
282 if (prev_status.state != RTE_TIMER_PENDING)
285 /* here, we know that timer is stopped or pending,
286 * mark it atomically as being configured */
287 status.state = RTE_TIMER_RUNNING;
288 status.owner = (int16_t)lcore_id;
289 success = rte_atomic32_cmpset(&tim->status.u32,
298 * Return a skiplist level for a new entry.
299 * This probabilistically gives a level with p=1/4 that an entry at level n
300 * will also appear at level n+1.
303 timer_get_skiplist_level(unsigned curr_depth)
305 #ifdef RTE_LIBRTE_TIMER_DEBUG
306 static uint32_t i, count = 0;
307 static uint32_t levels[MAX_SKIPLIST_DEPTH] = {0};
310 /* probability value is 1/4, i.e. all at level 0, 1 in 4 is at level 1,
311 * 1 in 16 at level 2, 1 in 64 at level 3, etc. Calculated using lowest
312 * bit position of a (pseudo)random number.
314 uint32_t rand = rte_rand() & (UINT32_MAX - 1);
315 uint32_t level = rand == 0 ? MAX_SKIPLIST_DEPTH : (rte_bsf32(rand)-1) / 2;
317 /* limit the levels used to one above our current level, so we don't,
318 * for instance, have a level 0 and a level 7 without anything between
320 if (level > curr_depth)
322 if (level >= MAX_SKIPLIST_DEPTH)
323 level = MAX_SKIPLIST_DEPTH-1;
324 #ifdef RTE_LIBRTE_TIMER_DEBUG
327 if (count % 10000 == 0)
328 for (i = 0; i < MAX_SKIPLIST_DEPTH; i++)
329 printf("Level %u: %u\n", (unsigned)i, (unsigned)levels[i]);
335 * For a given time value, get the entries at each level which
336 * are <= that time value.
339 timer_get_prev_entries(uint64_t time_val, unsigned tim_lcore,
340 struct rte_timer **prev, struct priv_timer *priv_timer)
342 unsigned lvl = priv_timer[tim_lcore].curr_skiplist_depth;
343 prev[lvl] = &priv_timer[tim_lcore].pending_head;
346 prev[lvl] = prev[lvl+1];
347 while (prev[lvl]->sl_next[lvl] &&
348 prev[lvl]->sl_next[lvl]->expire <= time_val)
349 prev[lvl] = prev[lvl]->sl_next[lvl];
354 * Given a timer node in the skiplist, find the previous entries for it at
355 * all skiplist levels.
358 timer_get_prev_entries_for_node(struct rte_timer *tim, unsigned tim_lcore,
359 struct rte_timer **prev,
360 struct priv_timer *priv_timer)
364 /* to get a specific entry in the list, look for just lower than the time
365 * values, and then increment on each level individually if necessary
367 timer_get_prev_entries(tim->expire - 1, tim_lcore, prev, priv_timer);
368 for (i = priv_timer[tim_lcore].curr_skiplist_depth - 1; i >= 0; i--) {
369 while (prev[i]->sl_next[i] != NULL &&
370 prev[i]->sl_next[i] != tim &&
371 prev[i]->sl_next[i]->expire <= tim->expire)
372 prev[i] = prev[i]->sl_next[i];
376 /* call with lock held as necessary
378 * timer must be in config state
379 * timer must not be in a list
382 timer_add(struct rte_timer *tim, unsigned int tim_lcore,
383 struct priv_timer *priv_timer)
386 struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
388 /* find where exactly this element goes in the list of elements
390 timer_get_prev_entries(tim->expire, tim_lcore, prev, priv_timer);
392 /* now assign it a new level and add at that level */
393 const unsigned tim_level = timer_get_skiplist_level(
394 priv_timer[tim_lcore].curr_skiplist_depth);
395 if (tim_level == priv_timer[tim_lcore].curr_skiplist_depth)
396 priv_timer[tim_lcore].curr_skiplist_depth++;
400 tim->sl_next[lvl] = prev[lvl]->sl_next[lvl];
401 prev[lvl]->sl_next[lvl] = tim;
404 tim->sl_next[0] = prev[0]->sl_next[0];
405 prev[0]->sl_next[0] = tim;
407 /* save the lowest list entry into the expire field of the dummy hdr
408 * NOTE: this is not atomic on 32-bit*/
409 priv_timer[tim_lcore].pending_head.expire = priv_timer[tim_lcore].\
410 pending_head.sl_next[0]->expire;
414 * del from list, lock if needed
415 * timer must be in config state
416 * timer must be in a list
419 timer_del(struct rte_timer *tim, union rte_timer_status prev_status,
420 int local_is_locked, struct priv_timer *priv_timer)
422 unsigned lcore_id = rte_lcore_id();
423 unsigned prev_owner = prev_status.owner;
425 struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
427 /* if timer needs is pending another core, we need to lock the
428 * list; if it is on local core, we need to lock if we are not
429 * called from rte_timer_manage() */
430 if (prev_owner != lcore_id || !local_is_locked)
431 rte_spinlock_lock(&priv_timer[prev_owner].list_lock);
433 /* save the lowest list entry into the expire field of the dummy hdr.
434 * NOTE: this is not atomic on 32-bit */
435 if (tim == priv_timer[prev_owner].pending_head.sl_next[0])
436 priv_timer[prev_owner].pending_head.expire =
437 ((tim->sl_next[0] == NULL) ? 0 : tim->sl_next[0]->expire);
439 /* adjust pointers from previous entries to point past this */
440 timer_get_prev_entries_for_node(tim, prev_owner, prev, priv_timer);
441 for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--) {
442 if (prev[i]->sl_next[i] == tim)
443 prev[i]->sl_next[i] = tim->sl_next[i];
446 /* in case we deleted last entry at a level, adjust down max level */
447 for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
448 if (priv_timer[prev_owner].pending_head.sl_next[i] == NULL)
449 priv_timer[prev_owner].curr_skiplist_depth --;
453 if (prev_owner != lcore_id || !local_is_locked)
454 rte_spinlock_unlock(&priv_timer[prev_owner].list_lock);
457 /* Reset and start the timer associated with the timer handle (private func) */
459 __rte_timer_reset(struct rte_timer *tim, uint64_t expire,
460 uint64_t period, unsigned tim_lcore,
461 rte_timer_cb_t fct, void *arg,
463 struct rte_timer_data *timer_data)
465 union rte_timer_status prev_status, status;
467 unsigned lcore_id = rte_lcore_id();
468 struct priv_timer *priv_timer = timer_data->priv_timer;
470 /* round robin for tim_lcore */
471 if (tim_lcore == (unsigned)LCORE_ID_ANY) {
472 if (lcore_id < RTE_MAX_LCORE) {
473 /* EAL thread with valid lcore_id */
474 tim_lcore = rte_get_next_lcore(
475 priv_timer[lcore_id].prev_lcore,
477 priv_timer[lcore_id].prev_lcore = tim_lcore;
479 /* non-EAL thread do not run rte_timer_manage(),
480 * so schedule the timer on the first enabled lcore. */
481 tim_lcore = rte_get_next_lcore(LCORE_ID_ANY, 0, 1);
484 /* wait that the timer is in correct status before update,
485 * and mark it as being configured */
486 ret = timer_set_config_state(tim, &prev_status, priv_timer);
490 __TIMER_STAT_ADD(priv_timer, reset, 1);
491 if (prev_status.state == RTE_TIMER_RUNNING &&
492 lcore_id < RTE_MAX_LCORE) {
493 priv_timer[lcore_id].updated = 1;
496 /* remove it from list */
497 if (prev_status.state == RTE_TIMER_PENDING) {
498 timer_del(tim, prev_status, local_is_locked, priv_timer);
499 __TIMER_STAT_ADD(priv_timer, pending, -1);
502 tim->period = period;
503 tim->expire = expire;
507 /* if timer needs to be scheduled on another core, we need to
508 * lock the destination list; if it is on local core, we need to lock if
509 * we are not called from rte_timer_manage()
511 if (tim_lcore != lcore_id || !local_is_locked)
512 rte_spinlock_lock(&priv_timer[tim_lcore].list_lock);
514 __TIMER_STAT_ADD(priv_timer, pending, 1);
515 timer_add(tim, tim_lcore, priv_timer);
517 /* update state: as we are in CONFIG state, only us can modify
518 * the state so we don't need to use cmpset() here */
520 status.state = RTE_TIMER_PENDING;
521 status.owner = (int16_t)tim_lcore;
522 tim->status.u32 = status.u32;
524 if (tim_lcore != lcore_id || !local_is_locked)
525 rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
530 /* Reset and start the timer associated with the timer handle tim */
532 rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
533 enum rte_timer_type type, unsigned int tim_lcore,
534 rte_timer_cb_t fct, void *arg)
536 return rte_timer_alt_reset(default_data_id, tim, ticks, type,
537 tim_lcore, fct, arg);
541 rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
542 uint64_t ticks, enum rte_timer_type type,
543 unsigned int tim_lcore, rte_timer_cb_t fct, void *arg)
545 uint64_t cur_time = rte_get_timer_cycles();
547 struct rte_timer_data *timer_data;
549 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
551 if (type == PERIODICAL)
556 return __rte_timer_reset(tim, cur_time + ticks, period, tim_lcore,
557 fct, arg, 0, timer_data);
560 /* loop until rte_timer_reset() succeed */
562 rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
563 enum rte_timer_type type, unsigned tim_lcore,
564 rte_timer_cb_t fct, void *arg)
566 while (rte_timer_reset(tim, ticks, type, tim_lcore,
572 __rte_timer_stop(struct rte_timer *tim, int local_is_locked,
573 struct rte_timer_data *timer_data)
575 union rte_timer_status prev_status, status;
576 unsigned lcore_id = rte_lcore_id();
578 struct priv_timer *priv_timer = timer_data->priv_timer;
580 /* wait that the timer is in correct status before update,
581 * and mark it as being configured */
582 ret = timer_set_config_state(tim, &prev_status, priv_timer);
586 __TIMER_STAT_ADD(priv_timer, stop, 1);
587 if (prev_status.state == RTE_TIMER_RUNNING &&
588 lcore_id < RTE_MAX_LCORE) {
589 priv_timer[lcore_id].updated = 1;
592 /* remove it from list */
593 if (prev_status.state == RTE_TIMER_PENDING) {
594 timer_del(tim, prev_status, local_is_locked, priv_timer);
595 __TIMER_STAT_ADD(priv_timer, pending, -1);
598 /* mark timer as stopped */
600 status.state = RTE_TIMER_STOP;
601 status.owner = RTE_TIMER_NO_OWNER;
602 tim->status.u32 = status.u32;
607 /* Stop the timer associated with the timer handle tim */
609 rte_timer_stop(struct rte_timer *tim)
611 return rte_timer_alt_stop(default_data_id, tim);
615 rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim)
617 struct rte_timer_data *timer_data;
619 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
621 return __rte_timer_stop(tim, 0, timer_data);
624 /* loop until rte_timer_stop() succeed */
626 rte_timer_stop_sync(struct rte_timer *tim)
628 while (rte_timer_stop(tim) != 0)
632 /* Test the PENDING status of the timer handle tim */
634 rte_timer_pending(struct rte_timer *tim)
636 return tim->status.state == RTE_TIMER_PENDING;
639 /* must be called periodically, run all timer that expired */
641 __rte_timer_manage(struct rte_timer_data *timer_data)
643 union rte_timer_status status;
644 struct rte_timer *tim, *next_tim;
645 struct rte_timer *run_first_tim, **pprev;
646 unsigned lcore_id = rte_lcore_id();
647 struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
650 struct priv_timer *priv_timer = timer_data->priv_timer;
652 /* timer manager only runs on EAL thread with valid lcore_id */
653 assert(lcore_id < RTE_MAX_LCORE);
655 __TIMER_STAT_ADD(priv_timer, manage, 1);
656 /* optimize for the case where per-cpu list is empty */
657 if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL)
659 cur_time = rte_get_timer_cycles();
662 /* on 64-bit the value cached in the pending_head.expired will be
663 * updated atomically, so we can consult that for a quick check here
664 * outside the lock */
665 if (likely(priv_timer[lcore_id].pending_head.expire > cur_time))
669 /* browse ordered list, add expired timers in 'expired' list */
670 rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
672 /* if nothing to do just unlock and return */
673 if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL ||
674 priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time) {
675 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
679 /* save start of list of expired timers */
680 tim = priv_timer[lcore_id].pending_head.sl_next[0];
682 /* break the existing list at current time point */
683 timer_get_prev_entries(cur_time, lcore_id, prev, priv_timer);
684 for (i = priv_timer[lcore_id].curr_skiplist_depth -1; i >= 0; i--) {
685 if (prev[i] == &priv_timer[lcore_id].pending_head)
687 priv_timer[lcore_id].pending_head.sl_next[i] =
689 if (prev[i]->sl_next[i] == NULL)
690 priv_timer[lcore_id].curr_skiplist_depth--;
691 prev[i] ->sl_next[i] = NULL;
694 /* transition run-list from PENDING to RUNNING */
696 pprev = &run_first_tim;
698 for ( ; tim != NULL; tim = next_tim) {
699 next_tim = tim->sl_next[0];
701 ret = timer_set_running_state(tim);
702 if (likely(ret == 0)) {
703 pprev = &tim->sl_next[0];
705 /* another core is trying to re-config this one,
706 * remove it from local expired list
712 /* update the next to expire timer value */
713 priv_timer[lcore_id].pending_head.expire =
714 (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 :
715 priv_timer[lcore_id].pending_head.sl_next[0]->expire;
717 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
719 /* now scan expired list and call callbacks */
720 for (tim = run_first_tim; tim != NULL; tim = next_tim) {
721 next_tim = tim->sl_next[0];
722 priv_timer[lcore_id].updated = 0;
723 priv_timer[lcore_id].running_tim = tim;
725 /* execute callback function with list unlocked */
726 tim->f(tim, tim->arg);
728 __TIMER_STAT_ADD(priv_timer, pending, -1);
729 /* the timer was stopped or reloaded by the callback
730 * function, we have nothing to do here */
731 if (priv_timer[lcore_id].updated == 1)
734 if (tim->period == 0) {
735 /* remove from done list and mark timer as stopped */
736 status.state = RTE_TIMER_STOP;
737 status.owner = RTE_TIMER_NO_OWNER;
739 tim->status.u32 = status.u32;
742 /* keep it in list and mark timer as pending */
743 rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
744 status.state = RTE_TIMER_PENDING;
745 __TIMER_STAT_ADD(priv_timer, pending, 1);
746 status.owner = (int16_t)lcore_id;
748 tim->status.u32 = status.u32;
749 __rte_timer_reset(tim, tim->expire + tim->period,
750 tim->period, lcore_id, tim->f, tim->arg, 1,
752 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
755 priv_timer[lcore_id].running_tim = NULL;
759 rte_timer_manage(void)
761 struct rte_timer_data *timer_data;
763 TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
765 __rte_timer_manage(timer_data);
771 rte_timer_alt_manage(uint32_t timer_data_id,
772 unsigned int *poll_lcores,
774 rte_timer_alt_manage_cb_t f)
776 unsigned int default_poll_lcores[] = {rte_lcore_id()};
777 union rte_timer_status status;
778 struct rte_timer *tim, *next_tim, **pprev;
779 struct rte_timer *run_first_tims[RTE_MAX_LCORE];
780 unsigned int this_lcore = rte_lcore_id();
781 struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
785 struct rte_timer_data *data;
786 struct priv_timer *privp;
789 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, data, -EINVAL);
791 /* timer manager only runs on EAL thread with valid lcore_id */
792 assert(this_lcore < RTE_MAX_LCORE);
794 __TIMER_STAT_ADD(data->priv_timer, manage, 1);
796 if (poll_lcores == NULL) {
797 poll_lcores = default_poll_lcores;
798 nb_poll_lcores = RTE_DIM(default_poll_lcores);
801 for (i = 0; i < nb_poll_lcores; i++) {
802 poll_lcore = poll_lcores[i];
803 privp = &data->priv_timer[poll_lcore];
805 /* optimize for the case where per-cpu list is empty */
806 if (privp->pending_head.sl_next[0] == NULL)
808 cur_time = rte_get_timer_cycles();
811 /* on 64-bit the value cached in the pending_head.expired will
812 * be updated atomically, so we can consult that for a quick
813 * check here outside the lock
815 if (likely(privp->pending_head.expire > cur_time))
819 /* browse ordered list, add expired timers in 'expired' list */
820 rte_spinlock_lock(&privp->list_lock);
822 /* if nothing to do just unlock and return */
823 if (privp->pending_head.sl_next[0] == NULL ||
824 privp->pending_head.sl_next[0]->expire > cur_time) {
825 rte_spinlock_unlock(&privp->list_lock);
829 /* save start of list of expired timers */
830 tim = privp->pending_head.sl_next[0];
832 /* break the existing list at current time point */
833 timer_get_prev_entries(cur_time, poll_lcore, prev,
835 for (j = privp->curr_skiplist_depth - 1; j >= 0; j--) {
836 if (prev[j] == &privp->pending_head)
838 privp->pending_head.sl_next[j] =
840 if (prev[j]->sl_next[j] == NULL)
841 privp->curr_skiplist_depth--;
843 prev[j]->sl_next[j] = NULL;
846 /* transition run-list from PENDING to RUNNING */
847 run_first_tims[nb_runlists] = tim;
848 pprev = &run_first_tims[nb_runlists];
851 for ( ; tim != NULL; tim = next_tim) {
852 next_tim = tim->sl_next[0];
854 ret = timer_set_running_state(tim);
855 if (likely(ret == 0)) {
856 pprev = &tim->sl_next[0];
858 /* another core is trying to re-config this one,
859 * remove it from local expired list
865 /* update the next to expire timer value */
866 privp->pending_head.expire =
867 (privp->pending_head.sl_next[0] == NULL) ? 0 :
868 privp->pending_head.sl_next[0]->expire;
870 rte_spinlock_unlock(&privp->list_lock);
873 /* Now process the run lists */
876 uint64_t min_expire = UINT64_MAX;
879 /* Find the next oldest timer to process */
880 for (i = 0; i < nb_runlists; i++) {
881 tim = run_first_tims[i];
883 if (tim != NULL && tim->expire < min_expire) {
884 min_expire = tim->expire;
893 tim = run_first_tims[min_idx];
895 /* Move down the runlist from which we picked a timer to
898 run_first_tims[min_idx] = run_first_tims[min_idx]->sl_next[0];
900 data->priv_timer[this_lcore].updated = 0;
901 data->priv_timer[this_lcore].running_tim = tim;
903 /* Call the provided callback function */
906 __TIMER_STAT_ADD(data->priv_timer, pending, -1);
908 /* the timer was stopped or reloaded by the callback
909 * function, we have nothing to do here
911 if (data->priv_timer[this_lcore].updated == 1)
914 if (tim->period == 0) {
915 /* remove from done list and mark timer as stopped */
916 status.state = RTE_TIMER_STOP;
917 status.owner = RTE_TIMER_NO_OWNER;
919 tim->status.u32 = status.u32;
921 /* keep it in list and mark timer as pending */
923 &data->priv_timer[this_lcore].list_lock);
924 status.state = RTE_TIMER_PENDING;
925 __TIMER_STAT_ADD(data->priv_timer, pending, 1);
926 status.owner = (int16_t)this_lcore;
928 tim->status.u32 = status.u32;
929 __rte_timer_reset(tim, tim->expire + tim->period,
930 tim->period, this_lcore, tim->f, tim->arg, 1,
933 &data->priv_timer[this_lcore].list_lock);
936 data->priv_timer[this_lcore].running_tim = NULL;
942 /* Walk pending lists, stopping timers and calling user-specified function */
944 rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores,
946 rte_timer_stop_all_cb_t f, void *f_arg)
949 struct priv_timer *priv_timer;
951 struct rte_timer *tim, *next_tim;
952 struct rte_timer_data *timer_data;
954 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
956 for (i = 0; i < nb_walk_lcores; i++) {
957 walk_lcore = walk_lcores[i];
958 priv_timer = &timer_data->priv_timer[walk_lcore];
960 rte_spinlock_lock(&priv_timer->list_lock);
962 for (tim = priv_timer->pending_head.sl_next[0];
965 next_tim = tim->sl_next[0];
967 /* Call timer_stop with lock held */
968 __rte_timer_stop(tim, 1, timer_data);
974 rte_spinlock_unlock(&priv_timer->list_lock);
981 rte_timer_next_ticks(void)
983 unsigned int lcore_id = rte_lcore_id();
984 struct rte_timer_data *timer_data;
985 struct priv_timer *priv_timer;
986 const struct rte_timer *tm;
988 int64_t left = -ENOENT;
990 TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
992 priv_timer = timer_data->priv_timer;
993 cur_time = rte_get_timer_cycles();
995 rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
996 tm = priv_timer[lcore_id].pending_head.sl_next[0];
998 left = tm->expire - cur_time;
1002 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
1007 /* dump statistics about timers */
1009 __rte_timer_dump_stats(struct rte_timer_data *timer_data __rte_unused, FILE *f)
1011 #ifdef RTE_LIBRTE_TIMER_DEBUG
1012 struct rte_timer_debug_stats sum;
1014 struct priv_timer *priv_timer = timer_data->priv_timer;
1016 memset(&sum, 0, sizeof(sum));
1017 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1018 sum.reset += priv_timer[lcore_id].stats.reset;
1019 sum.stop += priv_timer[lcore_id].stats.stop;
1020 sum.manage += priv_timer[lcore_id].stats.manage;
1021 sum.pending += priv_timer[lcore_id].stats.pending;
1023 fprintf(f, "Timer statistics:\n");
1024 fprintf(f, " reset = %"PRIu64"\n", sum.reset);
1025 fprintf(f, " stop = %"PRIu64"\n", sum.stop);
1026 fprintf(f, " manage = %"PRIu64"\n", sum.manage);
1027 fprintf(f, " pending = %"PRIu64"\n", sum.pending);
1029 fprintf(f, "No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n");
1034 rte_timer_dump_stats(FILE *f)
1036 return rte_timer_alt_dump_stats(default_data_id, f);
1040 rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused, FILE *f)
1042 struct rte_timer_data *timer_data;
1044 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
1046 __rte_timer_dump_stats(timer_data, f);