X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_timer%2Frte_timer.c;h=30c7b0ab43f8a93f4cfcd7a9d50af3c5f178d3bc;hb=c9b413c3b1cfb52740acb2a0fbf1b73acdb93646;hp=604ecabcd22fb7893ddf7f9a38d11f52563b648e;hpb=369991d997e4abdee355e19ffbb41a4d246cafa2;p=dpdk.git diff --git a/lib/librte_timer/rte_timer.c b/lib/librte_timer/rte_timer.c index 604ecabcd2..30c7b0ab43 100644 --- a/lib/librte_timer/rte_timer.c +++ b/lib/librte_timer/rte_timer.c @@ -241,24 +241,17 @@ timer_get_prev_entries_for_node(struct rte_timer *tim, unsigned tim_lcore, } } -/* - * add in list, lock if needed +/* call with lock held as necessary + * add in list * timer must be in config state * timer must not be in a list */ static void -timer_add(struct rte_timer *tim, unsigned tim_lcore, int local_is_locked) +timer_add(struct rte_timer *tim, unsigned int tim_lcore) { - unsigned lcore_id = rte_lcore_id(); unsigned lvl; struct rte_timer *prev[MAX_SKIPLIST_DEPTH+1]; - /* if timer needs to be scheduled on another core, we need to - * lock the list; if it is on local core, we need to lock if - * we are not called from rte_timer_manage() */ - if (tim_lcore != lcore_id || !local_is_locked) - rte_spinlock_lock(&priv_timer[tim_lcore].list_lock); - /* find where exactly this element goes in the list of elements * for each depth. */ timer_get_prev_entries(tim->expire, tim_lcore, prev); @@ -282,9 +275,6 @@ timer_add(struct rte_timer *tim, unsigned tim_lcore, int local_is_locked) * NOTE: this is not atomic on 32-bit*/ priv_timer[tim_lcore].pending_head.expire = priv_timer[tim_lcore].\ pending_head.sl_next[0]->expire; - - if (tim_lcore != lcore_id || !local_is_locked) - rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock); } /* @@ -379,8 +369,15 @@ __rte_timer_reset(struct rte_timer *tim, uint64_t expire, tim->f = fct; tim->arg = arg; + /* if timer needs to be scheduled on another core, we need to + * lock the destination list; if it is on local core, we need to lock if + * we are not called from rte_timer_manage() + */ + if (tim_lcore != lcore_id || !local_is_locked) + rte_spinlock_lock(&priv_timer[tim_lcore].list_lock); + __TIMER_STAT_ADD(pending, 1); - timer_add(tim, tim_lcore, local_is_locked); + timer_add(tim, tim_lcore); /* update state: as we are in CONFIG state, only us can modify * the state so we don't need to use cmpset() here */ @@ -389,6 +386,9 @@ __rte_timer_reset(struct rte_timer *tim, uint64_t expire, status.owner = (int16_t)tim_lcore; tim->status.u32 = status.u32; + if (tim_lcore != lcore_id || !local_is_locked) + rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock); + return 0; } @@ -403,7 +403,7 @@ rte_timer_reset(struct rte_timer *tim, uint64_t ticks, if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) && !(rte_lcore_is_enabled(tim_lcore) || - rte_lcore_has_role(tim_lcore, ROLE_SERVICE)))) + rte_lcore_has_role(tim_lcore, ROLE_SERVICE)))) return -1; if (type == PERIODICAL)