1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_cycles.h>
13 #include <rte_eal_memconfig.h>
14 #include <rte_memory.h>
15 #include <rte_lcore.h>
16 #include <rte_branch_prediction.h>
17 #include <rte_spinlock.h>
18 #include <rte_random.h>
19 #include <rte_pause.h>
20 #include <rte_memzone.h>
22 #include "rte_timer.h"
25 * Per-lcore info for timers.
28 struct rte_timer pending_head; /**< dummy timer instance to head up list */
29 rte_spinlock_t list_lock; /**< lock to protect list access */
31 /** per-core variable that true if a timer was updated on this
32 * core since last reset of the variable */
35 /** track the current depth of the skiplist */
36 unsigned curr_skiplist_depth;
38 unsigned prev_lcore; /**< used for lcore round robin */
40 /** running timer on this lcore now */
41 struct rte_timer *running_tim;
43 #ifdef RTE_LIBRTE_TIMER_DEBUG
44 /** per-lcore statistics */
45 struct rte_timer_debug_stats stats;
47 } __rte_cache_aligned;
49 #define FL_ALLOCATED (1 << 0)
50 struct rte_timer_data {
51 struct priv_timer priv_timer[RTE_MAX_LCORE];
52 uint8_t internal_flags;
55 #define RTE_MAX_DATA_ELS 64
56 static const struct rte_memzone *rte_timer_data_mz;
57 static int *volatile rte_timer_mz_refcnt;
58 static struct rte_timer_data *rte_timer_data_arr;
59 static const uint32_t default_data_id;
60 static uint32_t rte_timer_subsystem_initialized;
62 /* when debug is enabled, store some statistics */
63 #ifdef RTE_LIBRTE_TIMER_DEBUG
64 #define __TIMER_STAT_ADD(priv_timer, name, n) do { \
65 unsigned __lcore_id = rte_lcore_id(); \
66 if (__lcore_id < RTE_MAX_LCORE) \
67 priv_timer[__lcore_id].stats.name += (n); \
70 #define __TIMER_STAT_ADD(priv_timer, name, n) do {} while (0)
74 timer_data_valid(uint32_t id)
76 return rte_timer_data_arr &&
77 (rte_timer_data_arr[id].internal_flags & FL_ALLOCATED);
80 /* validate ID and retrieve timer data pointer, or return error value */
81 #define TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, retval) do { \
82 if (id >= RTE_MAX_DATA_ELS || !timer_data_valid(id)) \
84 timer_data = &rte_timer_data_arr[id]; \
88 rte_timer_data_alloc(uint32_t *id_ptr)
91 struct rte_timer_data *data;
93 if (!rte_timer_subsystem_initialized)
96 for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
97 data = &rte_timer_data_arr[i];
98 if (!(data->internal_flags & FL_ALLOCATED)) {
99 data->internal_flags |= FL_ALLOCATED;
112 rte_timer_data_dealloc(uint32_t id)
114 struct rte_timer_data *timer_data;
115 TIMER_DATA_VALID_GET_OR_ERR_RET(id, timer_data, -EINVAL);
117 timer_data->internal_flags &= ~(FL_ALLOCATED);
122 /* Init the timer library. Allocate an array of timer data structs in shared
123 * memory, and allocate the zeroth entry for use with original timer
124 * APIs. Since the intersection of the sets of lcore ids in primary and
125 * secondary processes should be empty, the zeroth entry can be shared by
126 * multiple processes.
129 rte_timer_subsystem_init(void)
131 const struct rte_memzone *mz;
132 struct rte_timer_data *data;
134 static const char *mz_name = "rte_timer_mz";
135 const size_t data_arr_size =
136 RTE_MAX_DATA_ELS * sizeof(*rte_timer_data_arr);
137 const size_t mem_size = data_arr_size + sizeof(*rte_timer_mz_refcnt);
138 bool do_full_init = true;
140 rte_mcfg_timer_lock();
142 if (rte_timer_subsystem_initialized) {
143 rte_mcfg_timer_unlock();
147 mz = rte_memzone_lookup(mz_name);
149 mz = rte_memzone_reserve_aligned(mz_name, mem_size,
150 SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
152 rte_mcfg_timer_unlock();
157 do_full_init = false;
159 rte_timer_data_mz = mz;
160 rte_timer_data_arr = mz->addr;
161 rte_timer_mz_refcnt = (void *)((char *)mz->addr + data_arr_size);
164 for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
165 data = &rte_timer_data_arr[i];
167 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE;
170 &data->priv_timer[lcore_id].list_lock);
171 data->priv_timer[lcore_id].prev_lcore =
177 rte_timer_data_arr[default_data_id].internal_flags |= FL_ALLOCATED;
178 (*rte_timer_mz_refcnt)++;
180 rte_timer_subsystem_initialized = 1;
182 rte_mcfg_timer_unlock();
188 rte_timer_subsystem_finalize(void)
190 rte_mcfg_timer_lock();
192 if (!rte_timer_subsystem_initialized) {
193 rte_mcfg_timer_unlock();
197 if (--(*rte_timer_mz_refcnt) == 0)
198 rte_memzone_free(rte_timer_data_mz);
200 rte_timer_subsystem_initialized = 0;
202 rte_mcfg_timer_unlock();
205 /* Initialize the timer handle tim for use */
207 rte_timer_init(struct rte_timer *tim)
209 union rte_timer_status status;
211 status.state = RTE_TIMER_STOP;
212 status.owner = RTE_TIMER_NO_OWNER;
213 __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELAXED);
217 * if timer is pending or stopped (or running on the same core than
218 * us), mark timer as configuring, and on success return the previous
219 * status of the timer
222 timer_set_config_state(struct rte_timer *tim,
223 union rte_timer_status *ret_prev_status,
224 struct priv_timer *priv_timer)
226 union rte_timer_status prev_status, status;
230 lcore_id = rte_lcore_id();
232 /* wait that the timer is in correct status before update,
233 * and mark it as being configured */
234 prev_status.u32 = __atomic_load_n(&tim->status.u32, __ATOMIC_RELAXED);
236 while (success == 0) {
237 /* timer is running on another core
238 * or ready to run on local core, exit
240 if (prev_status.state == RTE_TIMER_RUNNING &&
241 (prev_status.owner != (uint16_t)lcore_id ||
242 tim != priv_timer[lcore_id].running_tim))
245 /* timer is being configured on another core */
246 if (prev_status.state == RTE_TIMER_CONFIG)
249 /* here, we know that timer is stopped or pending,
250 * mark it atomically as being configured */
251 status.state = RTE_TIMER_CONFIG;
252 status.owner = (int16_t)lcore_id;
253 /* CONFIG states are acting as locked states. If the
254 * timer is in CONFIG state, the state cannot be changed
255 * by other threads. So, we should use ACQUIRE here.
257 success = __atomic_compare_exchange_n(&tim->status.u32,
264 ret_prev_status->u32 = prev_status.u32;
269 * if timer is pending, mark timer as running
272 timer_set_running_state(struct rte_timer *tim)
274 union rte_timer_status prev_status, status;
275 unsigned lcore_id = rte_lcore_id();
278 /* wait that the timer is in correct status before update,
279 * and mark it as running */
280 prev_status.u32 = __atomic_load_n(&tim->status.u32, __ATOMIC_RELAXED);
282 while (success == 0) {
283 /* timer is not pending anymore */
284 if (prev_status.state != RTE_TIMER_PENDING)
287 /* we know that the timer will be pending at this point
288 * mark it atomically as being running
290 status.state = RTE_TIMER_RUNNING;
291 status.owner = (int16_t)lcore_id;
292 /* RUNNING states are acting as locked states. If the
293 * timer is in RUNNING state, the state cannot be changed
294 * by other threads. So, we should use ACQUIRE here.
296 success = __atomic_compare_exchange_n(&tim->status.u32,
307 * Return a skiplist level for a new entry.
308 * This probabilistically gives a level with p=1/4 that an entry at level n
309 * will also appear at level n+1.
312 timer_get_skiplist_level(unsigned curr_depth)
314 #ifdef RTE_LIBRTE_TIMER_DEBUG
315 static uint32_t i, count = 0;
316 static uint32_t levels[MAX_SKIPLIST_DEPTH] = {0};
319 /* probability value is 1/4, i.e. all at level 0, 1 in 4 is at level 1,
320 * 1 in 16 at level 2, 1 in 64 at level 3, etc. Calculated using lowest
321 * bit position of a (pseudo)random number.
323 uint32_t rand = rte_rand() & (UINT32_MAX - 1);
324 uint32_t level = rand == 0 ? MAX_SKIPLIST_DEPTH : (rte_bsf32(rand)-1) / 2;
326 /* limit the levels used to one above our current level, so we don't,
327 * for instance, have a level 0 and a level 7 without anything between
329 if (level > curr_depth)
331 if (level >= MAX_SKIPLIST_DEPTH)
332 level = MAX_SKIPLIST_DEPTH-1;
333 #ifdef RTE_LIBRTE_TIMER_DEBUG
336 if (count % 10000 == 0)
337 for (i = 0; i < MAX_SKIPLIST_DEPTH; i++)
338 printf("Level %u: %u\n", (unsigned)i, (unsigned)levels[i]);
344 * For a given time value, get the entries at each level which
345 * are <= that time value.
348 timer_get_prev_entries(uint64_t time_val, unsigned tim_lcore,
349 struct rte_timer **prev, struct priv_timer *priv_timer)
351 unsigned lvl = priv_timer[tim_lcore].curr_skiplist_depth;
352 prev[lvl] = &priv_timer[tim_lcore].pending_head;
355 prev[lvl] = prev[lvl+1];
356 while (prev[lvl]->sl_next[lvl] &&
357 prev[lvl]->sl_next[lvl]->expire <= time_val)
358 prev[lvl] = prev[lvl]->sl_next[lvl];
363 * Given a timer node in the skiplist, find the previous entries for it at
364 * all skiplist levels.
367 timer_get_prev_entries_for_node(struct rte_timer *tim, unsigned tim_lcore,
368 struct rte_timer **prev,
369 struct priv_timer *priv_timer)
373 /* to get a specific entry in the list, look for just lower than the time
374 * values, and then increment on each level individually if necessary
376 timer_get_prev_entries(tim->expire - 1, tim_lcore, prev, priv_timer);
377 for (i = priv_timer[tim_lcore].curr_skiplist_depth - 1; i >= 0; i--) {
378 while (prev[i]->sl_next[i] != NULL &&
379 prev[i]->sl_next[i] != tim &&
380 prev[i]->sl_next[i]->expire <= tim->expire)
381 prev[i] = prev[i]->sl_next[i];
385 /* call with lock held as necessary
387 * timer must be in config state
388 * timer must not be in a list
391 timer_add(struct rte_timer *tim, unsigned int tim_lcore,
392 struct priv_timer *priv_timer)
395 struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
397 /* find where exactly this element goes in the list of elements
399 timer_get_prev_entries(tim->expire, tim_lcore, prev, priv_timer);
401 /* now assign it a new level and add at that level */
402 const unsigned tim_level = timer_get_skiplist_level(
403 priv_timer[tim_lcore].curr_skiplist_depth);
404 if (tim_level == priv_timer[tim_lcore].curr_skiplist_depth)
405 priv_timer[tim_lcore].curr_skiplist_depth++;
409 tim->sl_next[lvl] = prev[lvl]->sl_next[lvl];
410 prev[lvl]->sl_next[lvl] = tim;
413 tim->sl_next[0] = prev[0]->sl_next[0];
414 prev[0]->sl_next[0] = tim;
416 /* save the lowest list entry into the expire field of the dummy hdr
417 * NOTE: this is not atomic on 32-bit*/
418 priv_timer[tim_lcore].pending_head.expire = priv_timer[tim_lcore].\
419 pending_head.sl_next[0]->expire;
423 * del from list, lock if needed
424 * timer must be in config state
425 * timer must be in a list
428 timer_del(struct rte_timer *tim, union rte_timer_status prev_status,
429 int local_is_locked, struct priv_timer *priv_timer)
431 unsigned lcore_id = rte_lcore_id();
432 unsigned prev_owner = prev_status.owner;
434 struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1];
436 /* if timer needs is pending another core, we need to lock the
437 * list; if it is on local core, we need to lock if we are not
438 * called from rte_timer_manage() */
439 if (prev_owner != lcore_id || !local_is_locked)
440 rte_spinlock_lock(&priv_timer[prev_owner].list_lock);
442 /* save the lowest list entry into the expire field of the dummy hdr.
443 * NOTE: this is not atomic on 32-bit */
444 if (tim == priv_timer[prev_owner].pending_head.sl_next[0])
445 priv_timer[prev_owner].pending_head.expire =
446 ((tim->sl_next[0] == NULL) ? 0 : tim->sl_next[0]->expire);
448 /* adjust pointers from previous entries to point past this */
449 timer_get_prev_entries_for_node(tim, prev_owner, prev, priv_timer);
450 for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--) {
451 if (prev[i]->sl_next[i] == tim)
452 prev[i]->sl_next[i] = tim->sl_next[i];
455 /* in case we deleted last entry at a level, adjust down max level */
456 for (i = priv_timer[prev_owner].curr_skiplist_depth - 1; i >= 0; i--)
457 if (priv_timer[prev_owner].pending_head.sl_next[i] == NULL)
458 priv_timer[prev_owner].curr_skiplist_depth --;
462 if (prev_owner != lcore_id || !local_is_locked)
463 rte_spinlock_unlock(&priv_timer[prev_owner].list_lock);
466 /* Reset and start the timer associated with the timer handle (private func) */
468 __rte_timer_reset(struct rte_timer *tim, uint64_t expire,
469 uint64_t period, unsigned tim_lcore,
470 rte_timer_cb_t fct, void *arg,
472 struct rte_timer_data *timer_data)
474 union rte_timer_status prev_status, status;
476 unsigned lcore_id = rte_lcore_id();
477 struct priv_timer *priv_timer = timer_data->priv_timer;
479 /* round robin for tim_lcore */
480 if (tim_lcore == (unsigned)LCORE_ID_ANY) {
481 if (lcore_id < RTE_MAX_LCORE) {
482 /* EAL thread with valid lcore_id */
483 tim_lcore = rte_get_next_lcore(
484 priv_timer[lcore_id].prev_lcore,
486 priv_timer[lcore_id].prev_lcore = tim_lcore;
488 /* non-EAL thread do not run rte_timer_manage(),
489 * so schedule the timer on the first enabled lcore. */
490 tim_lcore = rte_get_next_lcore(LCORE_ID_ANY, 0, 1);
493 /* wait that the timer is in correct status before update,
494 * and mark it as being configured */
495 ret = timer_set_config_state(tim, &prev_status, priv_timer);
499 __TIMER_STAT_ADD(priv_timer, reset, 1);
500 if (prev_status.state == RTE_TIMER_RUNNING &&
501 lcore_id < RTE_MAX_LCORE) {
502 priv_timer[lcore_id].updated = 1;
505 /* remove it from list */
506 if (prev_status.state == RTE_TIMER_PENDING) {
507 timer_del(tim, prev_status, local_is_locked, priv_timer);
508 __TIMER_STAT_ADD(priv_timer, pending, -1);
511 tim->period = period;
512 tim->expire = expire;
516 /* if timer needs to be scheduled on another core, we need to
517 * lock the destination list; if it is on local core, we need to lock if
518 * we are not called from rte_timer_manage()
520 if (tim_lcore != lcore_id || !local_is_locked)
521 rte_spinlock_lock(&priv_timer[tim_lcore].list_lock);
523 __TIMER_STAT_ADD(priv_timer, pending, 1);
524 timer_add(tim, tim_lcore, priv_timer);
526 /* update state: as we are in CONFIG state, only us can modify
527 * the state so we don't need to use cmpset() here */
528 status.state = RTE_TIMER_PENDING;
529 status.owner = (int16_t)tim_lcore;
530 /* The "RELEASE" ordering guarantees the memory operations above
531 * the status update are observed before the update by all threads
533 __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELEASE);
535 if (tim_lcore != lcore_id || !local_is_locked)
536 rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
541 /* Reset and start the timer associated with the timer handle tim */
543 rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
544 enum rte_timer_type type, unsigned int tim_lcore,
545 rte_timer_cb_t fct, void *arg)
547 return rte_timer_alt_reset(default_data_id, tim, ticks, type,
548 tim_lcore, fct, arg);
552 rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
553 uint64_t ticks, enum rte_timer_type type,
554 unsigned int tim_lcore, rte_timer_cb_t fct, void *arg)
556 uint64_t cur_time = rte_get_timer_cycles();
558 struct rte_timer_data *timer_data;
560 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
562 if (type == PERIODICAL)
567 return __rte_timer_reset(tim, cur_time + ticks, period, tim_lcore,
568 fct, arg, 0, timer_data);
571 /* loop until rte_timer_reset() succeed */
573 rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
574 enum rte_timer_type type, unsigned tim_lcore,
575 rte_timer_cb_t fct, void *arg)
577 while (rte_timer_reset(tim, ticks, type, tim_lcore,
583 __rte_timer_stop(struct rte_timer *tim, int local_is_locked,
584 struct rte_timer_data *timer_data)
586 union rte_timer_status prev_status, status;
587 unsigned lcore_id = rte_lcore_id();
589 struct priv_timer *priv_timer = timer_data->priv_timer;
591 /* wait that the timer is in correct status before update,
592 * and mark it as being configured */
593 ret = timer_set_config_state(tim, &prev_status, priv_timer);
597 __TIMER_STAT_ADD(priv_timer, stop, 1);
598 if (prev_status.state == RTE_TIMER_RUNNING &&
599 lcore_id < RTE_MAX_LCORE) {
600 priv_timer[lcore_id].updated = 1;
603 /* remove it from list */
604 if (prev_status.state == RTE_TIMER_PENDING) {
605 timer_del(tim, prev_status, local_is_locked, priv_timer);
606 __TIMER_STAT_ADD(priv_timer, pending, -1);
609 /* mark timer as stopped */
610 status.state = RTE_TIMER_STOP;
611 status.owner = RTE_TIMER_NO_OWNER;
612 /* The "RELEASE" ordering guarantees the memory operations above
613 * the status update are observed before the update by all threads
615 __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELEASE);
620 /* Stop the timer associated with the timer handle tim */
622 rte_timer_stop(struct rte_timer *tim)
624 return rte_timer_alt_stop(default_data_id, tim);
628 rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim)
630 struct rte_timer_data *timer_data;
632 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
634 return __rte_timer_stop(tim, 0, timer_data);
637 /* loop until rte_timer_stop() succeed */
639 rte_timer_stop_sync(struct rte_timer *tim)
641 while (rte_timer_stop(tim) != 0)
645 /* Test the PENDING status of the timer handle tim */
647 rte_timer_pending(struct rte_timer *tim)
649 return __atomic_load_n(&tim->status.state,
650 __ATOMIC_RELAXED) == RTE_TIMER_PENDING;
653 /* must be called periodically, run all timer that expired */
655 __rte_timer_manage(struct rte_timer_data *timer_data)
657 union rte_timer_status status;
658 struct rte_timer *tim, *next_tim;
659 struct rte_timer *run_first_tim, **pprev;
660 unsigned lcore_id = rte_lcore_id();
661 struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
664 struct priv_timer *priv_timer = timer_data->priv_timer;
666 /* timer manager only runs on EAL thread with valid lcore_id */
667 assert(lcore_id < RTE_MAX_LCORE);
669 __TIMER_STAT_ADD(priv_timer, manage, 1);
670 /* optimize for the case where per-cpu list is empty */
671 if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL)
673 cur_time = rte_get_timer_cycles();
676 /* on 64-bit the value cached in the pending_head.expired will be
677 * updated atomically, so we can consult that for a quick check here
678 * outside the lock */
679 if (likely(priv_timer[lcore_id].pending_head.expire > cur_time))
683 /* browse ordered list, add expired timers in 'expired' list */
684 rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
686 /* if nothing to do just unlock and return */
687 if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL ||
688 priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time) {
689 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
693 /* save start of list of expired timers */
694 tim = priv_timer[lcore_id].pending_head.sl_next[0];
696 /* break the existing list at current time point */
697 timer_get_prev_entries(cur_time, lcore_id, prev, priv_timer);
698 for (i = priv_timer[lcore_id].curr_skiplist_depth -1; i >= 0; i--) {
699 if (prev[i] == &priv_timer[lcore_id].pending_head)
701 priv_timer[lcore_id].pending_head.sl_next[i] =
703 if (prev[i]->sl_next[i] == NULL)
704 priv_timer[lcore_id].curr_skiplist_depth--;
705 prev[i] ->sl_next[i] = NULL;
708 /* transition run-list from PENDING to RUNNING */
710 pprev = &run_first_tim;
712 for ( ; tim != NULL; tim = next_tim) {
713 next_tim = tim->sl_next[0];
715 ret = timer_set_running_state(tim);
716 if (likely(ret == 0)) {
717 pprev = &tim->sl_next[0];
719 /* another core is trying to re-config this one,
720 * remove it from local expired list
726 /* update the next to expire timer value */
727 priv_timer[lcore_id].pending_head.expire =
728 (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 :
729 priv_timer[lcore_id].pending_head.sl_next[0]->expire;
731 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
733 /* now scan expired list and call callbacks */
734 for (tim = run_first_tim; tim != NULL; tim = next_tim) {
735 next_tim = tim->sl_next[0];
736 priv_timer[lcore_id].updated = 0;
737 priv_timer[lcore_id].running_tim = tim;
739 /* execute callback function with list unlocked */
740 tim->f(tim, tim->arg);
742 __TIMER_STAT_ADD(priv_timer, pending, -1);
743 /* the timer was stopped or reloaded by the callback
744 * function, we have nothing to do here */
745 if (priv_timer[lcore_id].updated == 1)
748 if (tim->period == 0) {
749 /* remove from done list and mark timer as stopped */
750 status.state = RTE_TIMER_STOP;
751 status.owner = RTE_TIMER_NO_OWNER;
752 /* The "RELEASE" ordering guarantees the memory
753 * operations above the status update are observed
754 * before the update by all threads
756 __atomic_store_n(&tim->status.u32, status.u32,
760 /* keep it in list and mark timer as pending */
761 rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
762 status.state = RTE_TIMER_PENDING;
763 __TIMER_STAT_ADD(priv_timer, pending, 1);
764 status.owner = (int16_t)lcore_id;
765 /* The "RELEASE" ordering guarantees the memory
766 * operations above the status update are observed
767 * before the update by all threads
769 __atomic_store_n(&tim->status.u32, status.u32,
771 __rte_timer_reset(tim, tim->expire + tim->period,
772 tim->period, lcore_id, tim->f, tim->arg, 1,
774 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
777 priv_timer[lcore_id].running_tim = NULL;
781 rte_timer_manage(void)
783 struct rte_timer_data *timer_data;
785 TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
787 __rte_timer_manage(timer_data);
793 rte_timer_alt_manage(uint32_t timer_data_id,
794 unsigned int *poll_lcores,
796 rte_timer_alt_manage_cb_t f)
798 unsigned int default_poll_lcores[] = {rte_lcore_id()};
799 union rte_timer_status status;
800 struct rte_timer *tim, *next_tim, **pprev;
801 struct rte_timer *run_first_tims[RTE_MAX_LCORE];
802 unsigned int this_lcore = rte_lcore_id();
803 struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
807 struct rte_timer_data *data;
808 struct priv_timer *privp;
811 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, data, -EINVAL);
813 /* timer manager only runs on EAL thread with valid lcore_id */
814 assert(this_lcore < RTE_MAX_LCORE);
816 __TIMER_STAT_ADD(data->priv_timer, manage, 1);
818 if (poll_lcores == NULL) {
819 poll_lcores = default_poll_lcores;
820 nb_poll_lcores = RTE_DIM(default_poll_lcores);
823 for (i = 0; i < nb_poll_lcores; i++) {
824 poll_lcore = poll_lcores[i];
825 privp = &data->priv_timer[poll_lcore];
827 /* optimize for the case where per-cpu list is empty */
828 if (privp->pending_head.sl_next[0] == NULL)
830 cur_time = rte_get_timer_cycles();
833 /* on 64-bit the value cached in the pending_head.expired will
834 * be updated atomically, so we can consult that for a quick
835 * check here outside the lock
837 if (likely(privp->pending_head.expire > cur_time))
841 /* browse ordered list, add expired timers in 'expired' list */
842 rte_spinlock_lock(&privp->list_lock);
844 /* if nothing to do just unlock and return */
845 if (privp->pending_head.sl_next[0] == NULL ||
846 privp->pending_head.sl_next[0]->expire > cur_time) {
847 rte_spinlock_unlock(&privp->list_lock);
851 /* save start of list of expired timers */
852 tim = privp->pending_head.sl_next[0];
854 /* break the existing list at current time point */
855 timer_get_prev_entries(cur_time, poll_lcore, prev,
857 for (j = privp->curr_skiplist_depth - 1; j >= 0; j--) {
858 if (prev[j] == &privp->pending_head)
860 privp->pending_head.sl_next[j] =
862 if (prev[j]->sl_next[j] == NULL)
863 privp->curr_skiplist_depth--;
865 prev[j]->sl_next[j] = NULL;
868 /* transition run-list from PENDING to RUNNING */
869 run_first_tims[nb_runlists] = tim;
870 pprev = &run_first_tims[nb_runlists];
873 for ( ; tim != NULL; tim = next_tim) {
874 next_tim = tim->sl_next[0];
876 ret = timer_set_running_state(tim);
877 if (likely(ret == 0)) {
878 pprev = &tim->sl_next[0];
880 /* another core is trying to re-config this one,
881 * remove it from local expired list
887 /* update the next to expire timer value */
888 privp->pending_head.expire =
889 (privp->pending_head.sl_next[0] == NULL) ? 0 :
890 privp->pending_head.sl_next[0]->expire;
892 rte_spinlock_unlock(&privp->list_lock);
895 /* Now process the run lists */
898 uint64_t min_expire = UINT64_MAX;
901 /* Find the next oldest timer to process */
902 for (i = 0; i < nb_runlists; i++) {
903 tim = run_first_tims[i];
905 if (tim != NULL && tim->expire < min_expire) {
906 min_expire = tim->expire;
915 tim = run_first_tims[min_idx];
917 /* Move down the runlist from which we picked a timer to
920 run_first_tims[min_idx] = run_first_tims[min_idx]->sl_next[0];
922 data->priv_timer[this_lcore].updated = 0;
923 data->priv_timer[this_lcore].running_tim = tim;
925 /* Call the provided callback function */
928 __TIMER_STAT_ADD(data->priv_timer, pending, -1);
930 /* the timer was stopped or reloaded by the callback
931 * function, we have nothing to do here
933 if (data->priv_timer[this_lcore].updated == 1)
936 if (tim->period == 0) {
937 /* remove from done list and mark timer as stopped */
938 status.state = RTE_TIMER_STOP;
939 status.owner = RTE_TIMER_NO_OWNER;
940 /* The "RELEASE" ordering guarantees the memory
941 * operations above the status update are observed
942 * before the update by all threads
944 __atomic_store_n(&tim->status.u32, status.u32,
947 /* keep it in list and mark timer as pending */
949 &data->priv_timer[this_lcore].list_lock);
950 status.state = RTE_TIMER_PENDING;
951 __TIMER_STAT_ADD(data->priv_timer, pending, 1);
952 status.owner = (int16_t)this_lcore;
953 /* The "RELEASE" ordering guarantees the memory
954 * operations above the status update are observed
955 * before the update by all threads
957 __atomic_store_n(&tim->status.u32, status.u32,
959 __rte_timer_reset(tim, tim->expire + tim->period,
960 tim->period, this_lcore, tim->f, tim->arg, 1,
963 &data->priv_timer[this_lcore].list_lock);
966 data->priv_timer[this_lcore].running_tim = NULL;
972 /* Walk pending lists, stopping timers and calling user-specified function */
974 rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores,
976 rte_timer_stop_all_cb_t f, void *f_arg)
979 struct priv_timer *priv_timer;
981 struct rte_timer *tim, *next_tim;
982 struct rte_timer_data *timer_data;
984 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
986 for (i = 0; i < nb_walk_lcores; i++) {
987 walk_lcore = walk_lcores[i];
988 priv_timer = &timer_data->priv_timer[walk_lcore];
990 rte_spinlock_lock(&priv_timer->list_lock);
992 for (tim = priv_timer->pending_head.sl_next[0];
995 next_tim = tim->sl_next[0];
997 /* Call timer_stop with lock held */
998 __rte_timer_stop(tim, 1, timer_data);
1004 rte_spinlock_unlock(&priv_timer->list_lock);
1011 rte_timer_next_ticks(void)
1013 unsigned int lcore_id = rte_lcore_id();
1014 struct rte_timer_data *timer_data;
1015 struct priv_timer *priv_timer;
1016 const struct rte_timer *tm;
1018 int64_t left = -ENOENT;
1020 TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
1022 priv_timer = timer_data->priv_timer;
1023 cur_time = rte_get_timer_cycles();
1025 rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
1026 tm = priv_timer[lcore_id].pending_head.sl_next[0];
1028 left = tm->expire - cur_time;
1032 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
1037 /* dump statistics about timers */
1039 __rte_timer_dump_stats(struct rte_timer_data *timer_data __rte_unused, FILE *f)
1041 #ifdef RTE_LIBRTE_TIMER_DEBUG
1042 struct rte_timer_debug_stats sum;
1044 struct priv_timer *priv_timer = timer_data->priv_timer;
1046 memset(&sum, 0, sizeof(sum));
1047 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1048 sum.reset += priv_timer[lcore_id].stats.reset;
1049 sum.stop += priv_timer[lcore_id].stats.stop;
1050 sum.manage += priv_timer[lcore_id].stats.manage;
1051 sum.pending += priv_timer[lcore_id].stats.pending;
1053 fprintf(f, "Timer statistics:\n");
1054 fprintf(f, " reset = %"PRIu64"\n", sum.reset);
1055 fprintf(f, " stop = %"PRIu64"\n", sum.stop);
1056 fprintf(f, " manage = %"PRIu64"\n", sum.manage);
1057 fprintf(f, " pending = %"PRIu64"\n", sum.pending);
1059 fprintf(f, "No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n");
1064 rte_timer_dump_stats(FILE *f)
1066 return rte_timer_alt_dump_stats(default_data_id, f);
1070 rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused, FILE *f)
1072 struct rte_timer_data *timer_data;
1074 TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
1076 __rte_timer_dump_stats(timer_data, f);