X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_timer%2Frte_timer.c;h=78158ab3125f909612b1c38f35b55be4b9e2d64a;hb=824e81f433fd10cbc05d93efb0aa5b711a76741b;hp=269a992b981fdb72a554d2ac143890a2f90be017;hpb=add720fce99710cda6a2a7aeba59a9da85ef9a33;p=dpdk.git diff --git a/lib/librte_timer/rte_timer.c b/lib/librte_timer/rte_timer.c index 269a992b98..78158ab312 100644 --- a/lib/librte_timer/rte_timer.c +++ b/lib/librte_timer/rte_timer.c @@ -35,6 +35,8 @@ #include #include #include +#include +#include #include #include @@ -43,13 +45,12 @@ #include #include #include -#include #include -#include #include #include #include #include +#include #include "rte_timer.h" @@ -68,6 +69,9 @@ struct priv_timer { unsigned prev_lcore; /**< used for lcore round robin */ + /** running timer on this lcore now */ + struct rte_timer *running_tim; + #ifdef RTE_LIBRTE_TIMER_DEBUG /** per-lcore statistics */ struct rte_timer_debug_stats stats; @@ -79,9 +83,10 @@ static struct priv_timer priv_timer[RTE_MAX_LCORE]; /* when debug is enabled, store some statistics */ #ifdef RTE_LIBRTE_TIMER_DEBUG -#define __TIMER_STAT_ADD(name, n) do { \ - unsigned __lcore_id = rte_lcore_id(); \ - priv_timer[__lcore_id].stats.name += (n); \ +#define __TIMER_STAT_ADD(name, n) do { \ + unsigned __lcore_id = rte_lcore_id(); \ + if (__lcore_id < RTE_MAX_LCORE) \ + priv_timer[__lcore_id].stats.name += (n); \ } while(0) #else #define __TIMER_STAT_ADD(name, n) do {} while(0) @@ -133,9 +138,12 @@ timer_set_config_state(struct rte_timer *tim, while (success == 0) { prev_status.u32 = tim->status.u32; - /* timer is running on another core, exit */ + /* timer is running on another core + * or ready to run on local core, exit + */ if (prev_status.state == RTE_TIMER_RUNNING && - (unsigned)prev_status.owner != lcore_id) + (prev_status.owner != (uint16_t)lcore_id || + tim != priv_timer[lcore_id].running_tim)) return -1; /* timer is being configured on another core */ @@ -175,7 +183,7 @@ timer_set_running_state(struct rte_timer *tim) return -1; /* here, we know that timer is stopped or pending, - * mark it atomically as beeing configured */ + * mark it atomically as being configured */ status.state = RTE_TIMER_RUNNING; status.owner = (int16_t)lcore_id; success = rte_atomic32_cmpset(&tim->status.u32, @@ -366,9 +374,16 @@ __rte_timer_reset(struct rte_timer *tim, uint64_t expire, /* round robin for tim_lcore */ if (tim_lcore == (unsigned)LCORE_ID_ANY) { - tim_lcore = rte_get_next_lcore(priv_timer[lcore_id].prev_lcore, - 0, 1); - priv_timer[lcore_id].prev_lcore = tim_lcore; + if (lcore_id < RTE_MAX_LCORE) { + /* EAL thread with valid lcore_id */ + tim_lcore = rte_get_next_lcore( + priv_timer[lcore_id].prev_lcore, + 0, 1); + priv_timer[lcore_id].prev_lcore = tim_lcore; + } else + /* non-EAL thread do not run rte_timer_manage(), + * so schedule the timer on the first enabled lcore. */ + tim_lcore = rte_get_next_lcore(LCORE_ID_ANY, 0, 1); } /* wait that the timer is in correct status before update, @@ -378,7 +393,8 @@ __rte_timer_reset(struct rte_timer *tim, uint64_t expire, return -1; __TIMER_STAT_ADD(reset, 1); - if (prev_status.state == RTE_TIMER_RUNNING) { + if (prev_status.state == RTE_TIMER_RUNNING && + lcore_id < RTE_MAX_LCORE) { priv_timer[lcore_id].updated = 1; } @@ -416,7 +432,8 @@ rte_timer_reset(struct rte_timer *tim, uint64_t ticks, uint64_t period; if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) && - !rte_lcore_is_enabled(tim_lcore))) + !(rte_lcore_is_enabled(tim_lcore) || + rte_lcore_has_role(tim_lcore, ROLE_SERVICE)))) return -1; if (type == PERIODICAL) @@ -424,10 +441,8 @@ rte_timer_reset(struct rte_timer *tim, uint64_t ticks, else period = 0; - __rte_timer_reset(tim, cur_time + ticks, period, tim_lcore, + return __rte_timer_reset(tim, cur_time + ticks, period, tim_lcore, fct, arg, 0); - - return 0; } /* loop until rte_timer_reset() succeed */ @@ -437,7 +452,8 @@ rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks, rte_timer_cb_t fct, void *arg) { while (rte_timer_reset(tim, ticks, type, tim_lcore, - fct, arg) != 0); + fct, arg) != 0) + rte_pause(); } /* Stop the timer associated with the timer handle tim */ @@ -455,7 +471,8 @@ rte_timer_stop(struct rte_timer *tim) return -1; __TIMER_STAT_ADD(stop, 1); - if (prev_status.state == RTE_TIMER_RUNNING) { + if (prev_status.state == RTE_TIMER_RUNNING && + lcore_id < RTE_MAX_LCORE) { priv_timer[lcore_id].updated = 1; } @@ -494,21 +511,25 @@ void rte_timer_manage(void) { union rte_timer_status status; struct rte_timer *tim, *next_tim; + struct rte_timer *run_first_tim, **pprev; unsigned lcore_id = rte_lcore_id(); struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1]; uint64_t cur_time; int i, ret; + /* timer manager only runs on EAL thread with valid lcore_id */ + assert(lcore_id < RTE_MAX_LCORE); + __TIMER_STAT_ADD(manage, 1); /* optimize for the case where per-cpu list is empty */ if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) return; cur_time = rte_get_timer_cycles(); -#ifdef RTE_ARCH_X86_64 - /* on 64-bit the value cached in the pending_head.expired will be updated - * atomically, so we can consult that for a quick check here outside the - * lock */ +#ifdef RTE_ARCH_64 + /* on 64-bit the value cached in the pending_head.expired will be + * updated atomically, so we can consult that for a quick check here + * outside the lock */ if (likely(priv_timer[lcore_id].pending_head.expire > cur_time)) return; #endif @@ -518,8 +539,10 @@ void rte_timer_manage(void) /* if nothing to do just unlock and return */ if (priv_timer[lcore_id].pending_head.sl_next[0] == NULL || - priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time) - goto done; + priv_timer[lcore_id].pending_head.sl_next[0]->expire > cur_time) { + rte_spinlock_unlock(&priv_timer[lcore_id].list_lock); + return; + } /* save start of list of expired timers */ tim = priv_timer[lcore_id].pending_head.sl_next[0]; @@ -527,30 +550,49 @@ void rte_timer_manage(void) /* break the existing list at current time point */ timer_get_prev_entries(cur_time, lcore_id, prev); for (i = priv_timer[lcore_id].curr_skiplist_depth -1; i >= 0; i--) { - priv_timer[lcore_id].pending_head.sl_next[i] = prev[i]->sl_next[i]; + if (prev[i] == &priv_timer[lcore_id].pending_head) + continue; + priv_timer[lcore_id].pending_head.sl_next[i] = + prev[i]->sl_next[i]; if (prev[i]->sl_next[i] == NULL) priv_timer[lcore_id].curr_skiplist_depth--; prev[i] ->sl_next[i] = NULL; } - /* now scan expired list and call callbacks */ + /* transition run-list from PENDING to RUNNING */ + run_first_tim = tim; + pprev = &run_first_tim; + for ( ; tim != NULL; tim = next_tim) { next_tim = tim->sl_next[0]; ret = timer_set_running_state(tim); + if (likely(ret == 0)) { + pprev = &tim->sl_next[0]; + } else { + /* another core is trying to re-config this one, + * remove it from local expired list + */ + *pprev = next_tim; + } + } - /* this timer was not pending, continue */ - if (ret < 0) - continue; + /* update the next to expire timer value */ + priv_timer[lcore_id].pending_head.expire = + (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 : + priv_timer[lcore_id].pending_head.sl_next[0]->expire; - rte_spinlock_unlock(&priv_timer[lcore_id].list_lock); + rte_spinlock_unlock(&priv_timer[lcore_id].list_lock); + /* now scan expired list and call callbacks */ + for (tim = run_first_tim; tim != NULL; tim = next_tim) { + next_tim = tim->sl_next[0]; priv_timer[lcore_id].updated = 0; + priv_timer[lcore_id].running_tim = tim; /* execute callback function with list unlocked */ tim->f(tim, tim->arg); - rte_spinlock_lock(&priv_timer[lcore_id].list_lock); __TIMER_STAT_ADD(pending, -1); /* the timer was stopped or reloaded by the callback * function, we have nothing to do here */ @@ -566,23 +608,18 @@ void rte_timer_manage(void) } else { /* keep it in list and mark timer as pending */ + rte_spinlock_lock(&priv_timer[lcore_id].list_lock); status.state = RTE_TIMER_PENDING; __TIMER_STAT_ADD(pending, 1); status.owner = (int16_t)lcore_id; rte_wmb(); tim->status.u32 = status.u32; - __rte_timer_reset(tim, cur_time + tim->period, - tim->period, lcore_id, tim->f, tim->arg, 1); + __rte_timer_reset(tim, tim->expire + tim->period, + tim->period, lcore_id, tim->f, tim->arg, 1); + rte_spinlock_unlock(&priv_timer[lcore_id].list_lock); } } - - /* update the next to expire timer value */ - priv_timer[lcore_id].pending_head.expire = - (priv_timer[lcore_id].pending_head.sl_next[0] == NULL) ? 0 : - priv_timer[lcore_id].pending_head.sl_next[0]->expire; -done: - /* job finished, unlock the list lock */ - rte_spinlock_unlock(&priv_timer[lcore_id].list_lock); + priv_timer[lcore_id].running_tim = NULL; } /* dump statistics about timers */