4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/queue.h>
41 #include <rte_atomic.h>
42 #include <rte_common.h>
43 #include <rte_cycles.h>
44 #include <rte_per_lcore.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
47 #include <rte_launch.h>
48 #include <rte_tailq.h>
50 #include <rte_per_lcore.h>
51 #include <rte_lcore.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_spinlock.h>
55 #include "rte_timer.h"
57 LIST_HEAD(rte_timer_list, rte_timer);
60 struct rte_timer_list pending; /**< list of pending timers */
61 struct rte_timer_list expired; /**< list of expired timers */
62 struct rte_timer_list done; /**< list of done timers */
63 rte_spinlock_t list_lock; /**< lock to protect list access */
65 /** per-core variable that true if a timer was updated on this
66 * core since last reset of the variable */
69 unsigned prev_lcore; /**< used for lcore round robin */
71 #ifdef RTE_LIBRTE_TIMER_DEBUG
72 /** per-lcore statistics */
73 struct rte_timer_debug_stats stats;
75 } __rte_cache_aligned;
77 /** per-lcore private info for timers */
78 static struct priv_timer priv_timer[RTE_MAX_LCORE];
80 /* when debug is enabled, store some statistics */
81 #ifdef RTE_LIBRTE_TIMER_DEBUG
82 #define __TIMER_STAT_ADD(name, n) do { \
83 unsigned __lcore_id = rte_lcore_id(); \
84 priv_timer[__lcore_id].stats.name += (n); \
87 #define __TIMER_STAT_ADD(name, n) do {} while(0)
90 /* this macro allow to modify var while browsing the list */
91 #define LIST_FOREACH_SAFE(var, var2, head, field) \
92 for ((var) = ((head)->lh_first), \
93 (var2) = ((var) ? ((var)->field.le_next) : NULL); \
96 (var2) = ((var) ? ((var)->field.le_next) : NULL))
99 /* Init the timer library. */
101 rte_timer_subsystem_init(void)
105 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) {
106 LIST_INIT(&priv_timer[lcore_id].pending);
107 LIST_INIT(&priv_timer[lcore_id].expired);
108 LIST_INIT(&priv_timer[lcore_id].done);
109 rte_spinlock_init(&priv_timer[lcore_id].list_lock);
110 priv_timer[lcore_id].prev_lcore = lcore_id;
114 /* Initialize the timer handle tim for use */
116 rte_timer_init(struct rte_timer *tim)
118 union rte_timer_status status;
120 status.state = RTE_TIMER_STOP;
121 status.owner = RTE_TIMER_NO_OWNER;
122 tim->status.u32 = status.u32;
126 * if timer is pending or stopped (or running on the same core than
127 * us), mark timer as configuring, and on success return the previous
128 * status of the timer
131 timer_set_config_state(struct rte_timer *tim,
132 union rte_timer_status *ret_prev_status)
134 union rte_timer_status prev_status, status;
138 lcore_id = rte_lcore_id();
140 /* wait that the timer is in correct status before update,
141 * and mark it as beeing configured */
142 while (success == 0) {
143 prev_status.u32 = tim->status.u32;
145 /* timer is running on another core, exit */
146 if (prev_status.state == RTE_TIMER_RUNNING &&
147 (unsigned)prev_status.owner != lcore_id)
150 /* timer is beeing configured on another core */
151 if (prev_status.state == RTE_TIMER_CONFIG)
154 /* here, we know that timer is stopped or pending,
155 * mark it atomically as beeing configured */
156 status.state = RTE_TIMER_CONFIG;
157 status.owner = (int16_t)lcore_id;
158 success = rte_atomic32_cmpset(&tim->status.u32,
163 ret_prev_status->u32 = prev_status.u32;
168 * if timer is pending, mark timer as running
171 timer_set_running_state(struct rte_timer *tim)
173 union rte_timer_status prev_status, status;
174 unsigned lcore_id = rte_lcore_id();
177 /* wait that the timer is in correct status before update,
178 * and mark it as running */
179 while (success == 0) {
180 prev_status.u32 = tim->status.u32;
182 /* timer is not pending anymore */
183 if (prev_status.state != RTE_TIMER_PENDING)
186 /* here, we know that timer is stopped or pending,
187 * mark it atomically as beeing configured */
188 status.state = RTE_TIMER_RUNNING;
189 status.owner = (int16_t)lcore_id;
190 success = rte_atomic32_cmpset(&tim->status.u32,
199 * add in list, lock if needed
200 * timer must be in config state
201 * timer must not be in a list
204 timer_add(struct rte_timer *tim, unsigned tim_lcore, int local_is_locked)
206 uint64_t cur_time = rte_get_hpet_cycles();
207 unsigned lcore_id = rte_lcore_id();
208 struct rte_timer *t, *t_prev;
210 /* if timer needs to be scheduled on another core, we need to
211 * lock the list; if it is on local core, we need to lock if
212 * we are not called from rte_timer_manage() */
213 if (tim_lcore != lcore_id || !local_is_locked)
214 rte_spinlock_lock(&priv_timer[tim_lcore].list_lock);
216 t = LIST_FIRST(&priv_timer[tim_lcore].pending);
218 /* list is empty or 'tim' will expire before 't' */
219 if (t == NULL || ((int64_t)(tim->expire - cur_time) <
220 (int64_t)(t->expire - cur_time))) {
221 LIST_INSERT_HEAD(&priv_timer[tim_lcore].pending, tim, next);
226 /* find an element that will expire after 'tim' */
227 LIST_FOREACH(t, &priv_timer[tim_lcore].pending, next) {
228 if ((int64_t)(tim->expire - cur_time) <
229 (int64_t)(t->expire - cur_time)) {
230 LIST_INSERT_BEFORE(t, tim, next);
236 /* not found, insert at the end of the list */
238 LIST_INSERT_AFTER(t_prev, tim, next);
241 if (tim_lcore != lcore_id || !local_is_locked)
242 rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
246 * del from list, lock if needed
247 * timer must be in config state
248 * timer must be in a list
251 timer_del(struct rte_timer *tim, unsigned prev_owner, int local_is_locked)
253 unsigned lcore_id = rte_lcore_id();
255 /* if timer needs is pending another core, we need to lock the
256 * list; if it is on local core, we need to lock if we are not
257 * called from rte_timer_manage() */
258 if (prev_owner != lcore_id || !local_is_locked)
259 rte_spinlock_lock(&priv_timer[prev_owner].list_lock);
261 LIST_REMOVE(tim, next);
263 if (prev_owner != lcore_id || !local_is_locked)
264 rte_spinlock_unlock(&priv_timer[prev_owner].list_lock);
267 /* Reset and start the timer associated with the timer handle (private func) */
269 __rte_timer_reset(struct rte_timer *tim, uint64_t expire,
270 uint64_t period, unsigned tim_lcore,
271 rte_timer_cb_t fct, void *arg,
274 union rte_timer_status prev_status, status;
276 unsigned lcore_id = rte_lcore_id();
278 /* round robin for tim_lcore */
279 if (tim_lcore == (unsigned)LCORE_ID_ANY) {
280 tim_lcore = rte_get_next_lcore(priv_timer[lcore_id].prev_lcore,
282 priv_timer[lcore_id].prev_lcore = tim_lcore;
285 /* wait that the timer is in correct status before update,
286 * and mark it as beeing configured */
287 ret = timer_set_config_state(tim, &prev_status);
291 __TIMER_STAT_ADD(reset, 1);
292 priv_timer[lcore_id].updated = 1;
294 /* remove it from list */
295 if (prev_status.state == RTE_TIMER_PENDING ||
296 prev_status.state == RTE_TIMER_RUNNING) {
297 timer_del(tim, prev_status.owner, local_is_locked);
298 __TIMER_STAT_ADD(pending, -1);
301 tim->period = period;
302 tim->expire = expire;
306 __TIMER_STAT_ADD(pending, 1);
307 timer_add(tim, tim_lcore, local_is_locked);
309 /* update state: as we are in CONFIG state, only us can modify
310 * the state so we don't need to use cmpset() here */
312 status.state = RTE_TIMER_PENDING;
313 status.owner = (int16_t)tim_lcore;
314 tim->status.u32 = status.u32;
319 /* Reset and start the timer associated with the timer handle tim */
321 rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
322 enum rte_timer_type type, unsigned tim_lcore,
323 rte_timer_cb_t fct, void *arg)
325 uint64_t cur_time = rte_get_hpet_cycles();
328 if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) &&
329 !rte_lcore_is_enabled(tim_lcore)))
332 if (type == PERIODICAL)
337 __rte_timer_reset(tim, cur_time + ticks, period, tim_lcore,
343 /* loop until rte_timer_reset() succeed */
345 rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
346 enum rte_timer_type type, unsigned tim_lcore,
347 rte_timer_cb_t fct, void *arg)
349 while (rte_timer_reset(tim, ticks, type, tim_lcore,
353 /* Stop the timer associated with the timer handle tim */
355 rte_timer_stop(struct rte_timer *tim)
357 union rte_timer_status prev_status, status;
358 unsigned lcore_id = rte_lcore_id();
361 /* wait that the timer is in correct status before update,
362 * and mark it as beeing configured */
363 ret = timer_set_config_state(tim, &prev_status);
367 __TIMER_STAT_ADD(stop, 1);
368 priv_timer[lcore_id].updated = 1;
370 /* remove it from list */
371 if (prev_status.state == RTE_TIMER_PENDING ||
372 prev_status.state == RTE_TIMER_RUNNING) {
373 timer_del(tim, prev_status.owner, 0);
374 __TIMER_STAT_ADD(pending, -1);
377 /* mark timer as stopped */
379 status.state = RTE_TIMER_STOP;
380 status.owner = RTE_TIMER_NO_OWNER;
381 tim->status.u32 = status.u32;
386 /* loop until rte_timer_stop() succeed */
388 rte_timer_stop_sync(struct rte_timer *tim)
390 while (rte_timer_stop(tim) != 0);
393 /* Test the PENDING status of the timer handle tim */
395 rte_timer_pending(struct rte_timer *tim)
397 return tim->status.state == RTE_TIMER_PENDING;
400 /* must be called periodically, run all timer that expired */
401 void rte_timer_manage(void)
403 union rte_timer_status status;
404 struct rte_timer *tim, *tim2;
405 unsigned lcore_id = rte_lcore_id();
406 uint64_t cur_time = rte_get_hpet_cycles();
409 __TIMER_STAT_ADD(manage, 1);
411 /* browse ordered list, add expired timers in 'expired' list */
412 rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
414 LIST_FOREACH_SAFE(tim, tim2, &priv_timer[lcore_id].pending, next) {
415 if ((int64_t)(cur_time - tim->expire) < 0)
418 LIST_REMOVE(tim, next);
419 LIST_INSERT_HEAD(&priv_timer[lcore_id].expired, tim, next);
423 /* for each timer of 'expired' list, check state and execute callback */
424 while ((tim = LIST_FIRST(&priv_timer[lcore_id].expired)) != NULL) {
425 ret = timer_set_running_state(tim);
427 /* remove from expired list, and add it in done list */
428 LIST_REMOVE(tim, next);
429 LIST_INSERT_HEAD(&priv_timer[lcore_id].done, tim, next);
431 /* this timer was not pending, continue */
435 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
437 priv_timer[lcore_id].updated = 0;
439 /* execute callback function with list unlocked */
440 tim->f(tim, tim->arg);
442 rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
444 /* the timer was stopped or reloaded by the callback
445 * function, we have nothing to do here */
446 if (priv_timer[lcore_id].updated == 1)
449 if (tim->period == 0) {
450 /* remove from done list and mark timer as stopped */
451 LIST_REMOVE(tim, next);
452 __TIMER_STAT_ADD(pending, -1);
453 status.state = RTE_TIMER_STOP;
454 status.owner = RTE_TIMER_NO_OWNER;
456 tim->status.u32 = status.u32;
459 /* keep it in done list and mark timer as pending */
460 status.state = RTE_TIMER_PENDING;
461 status.owner = (int16_t)lcore_id;
463 tim->status.u32 = status.u32;
467 /* finally, browse done list, some timer may have to be
468 * rescheduled automatically */
469 LIST_FOREACH_SAFE(tim, tim2, &priv_timer[lcore_id].done, next) {
471 /* reset may fail if timer is beeing modified, in this
472 * case the timer will remain in 'done' list until the
473 * core that is modifying it remove it */
474 __rte_timer_reset(tim, cur_time + tim->period,
475 tim->period, lcore_id, tim->f,
479 /* job finished, unlock the list lock */
480 rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
483 /* dump statistics about timers */
484 void rte_timer_dump_stats(void)
486 #ifdef RTE_LIBRTE_TIMER_DEBUG
487 struct rte_timer_debug_stats sum;
490 memset(&sum, 0, sizeof(sum));
491 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
492 sum.reset += priv_timer[lcore_id].stats.reset;
493 sum.stop += priv_timer[lcore_id].stats.stop;
494 sum.manage += priv_timer[lcore_id].stats.manage;
495 sum.pending += priv_timer[lcore_id].stats.pending;
497 printf("Timer statistics:\n");
498 printf(" reset = %"PRIu64"\n", sum.reset);
499 printf(" stop = %"PRIu64"\n", sum.stop);
500 printf(" manage = %"PRIu64"\n", sum.manage);
501 printf(" pending = %"PRIu64"\n", sum.pending);
503 printf("No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n");