lib: fix typos
[dpdk.git] / lib / librte_timer / rte_timer.c
index 7457d32..88826f5 100644 (file)
 #include <rte_cycles.h>
 #include <rte_per_lcore.h>
 #include <rte_memory.h>
-#include <rte_memzone.h>
 #include <rte_launch.h>
 #include <rte_eal.h>
-#include <rte_per_lcore.h>
 #include <rte_lcore.h>
 #include <rte_branch_prediction.h>
 #include <rte_spinlock.h>
 #include <rte_random.h>
+#include <rte_pause.h>
 
 #include "rte_timer.h"
 
@@ -69,6 +68,9 @@ struct priv_timer {
 
        unsigned prev_lcore;              /**< used for lcore round robin */
 
+       /** running timer on this lcore now */
+       struct rte_timer *running_tim;
+
 #ifdef RTE_LIBRTE_TIMER_DEBUG
        /** per-lcore statistics */
        struct rte_timer_debug_stats stats;
@@ -135,9 +137,12 @@ timer_set_config_state(struct rte_timer *tim,
        while (success == 0) {
                prev_status.u32 = tim->status.u32;
 
-               /* timer is running on another core, exit */
+               /* timer is running on another core
+                * or ready to run on local core, exit
+                */
                if (prev_status.state == RTE_TIMER_RUNNING &&
-                   prev_status.owner != (uint16_t)lcore_id)
+                   (prev_status.owner != (uint16_t)lcore_id ||
+                    tim != priv_timer[lcore_id].running_tim))
                        return -1;
 
                /* timer is being configured on another core */
@@ -177,7 +182,7 @@ timer_set_running_state(struct rte_timer *tim)
                        return -1;
 
                /* here, we know that timer is stopped or pending,
-                * mark it atomically as beeing configured */
+                * mark it atomically as being configured */
                status.state = RTE_TIMER_RUNNING;
                status.owner = (int16_t)lcore_id;
                success = rte_atomic32_cmpset(&tim->status.u32,
@@ -190,7 +195,7 @@ timer_set_running_state(struct rte_timer *tim)
 
 /*
  * Return a skiplist level for a new entry.
- * This probabalistically gives a level with p=1/4 that an entry at level n
+ * This probabilistically gives a level with p=1/4 that an entry at level n
  * will also appear at level n+1.
  */
 static uint32_t
@@ -426,7 +431,8 @@ rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
        uint64_t period;
 
        if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) &&
-                       !rte_lcore_is_enabled(tim_lcore)))
+                       !(rte_lcore_is_enabled(tim_lcore) ||
+                               rte_lcore_has_role(tim_lcore, ROLE_SERVICE))))
                return -1;
 
        if (type == PERIODICAL)
@@ -519,7 +525,7 @@ void rte_timer_manage(void)
                return;
        cur_time = rte_get_timer_cycles();
 
-#ifdef RTE_ARCH_X86_64
+#ifdef RTE_ARCH_64
        /* on 64-bit the value cached in the pending_head.expired will be
         * updated atomically, so we can consult that for a quick check here
         * outside the lock */
@@ -564,10 +570,9 @@ void rte_timer_manage(void)
                        pprev = &tim->sl_next[0];
                } else {
                        /* another core is trying to re-config this one,
-                        * remove it from local expired list and put it
-                        * back on the priv_timer[] skip list */
+                        * remove it from local expired list
+                        */
                        *pprev = next_tim;
-                       timer_add(tim, lcore_id, 1);
                }
        }
 
@@ -582,6 +587,7 @@ void rte_timer_manage(void)
        for (tim = run_first_tim; tim != NULL; tim = next_tim) {
                next_tim = tim->sl_next[0];
                priv_timer[lcore_id].updated = 0;
+               priv_timer[lcore_id].running_tim = tim;
 
                /* execute callback function with list unlocked */
                tim->f(tim, tim->arg);
@@ -607,11 +613,12 @@ void rte_timer_manage(void)
                        status.owner = (int16_t)lcore_id;
                        rte_wmb();
                        tim->status.u32 = status.u32;
-                       __rte_timer_reset(tim, cur_time + tim->period,
+                       __rte_timer_reset(tim, tim->expire + tim->period,
                                tim->period, lcore_id, tim->f, tim->arg, 1);
                        rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
                }
        }
+       priv_timer[lcore_id].running_tim = NULL;
 }
 
 /* dump statistics about timers */