doc: add Meson coding style to contributors guide
[dpdk.git] / lib / librte_timer / rte_timer.c
index ae5d236..6d19ce4 100644 (file)
@@ -10,9 +10,9 @@
 #include <assert.h>
 #include <sys/queue.h>
 
-#include <rte_atomic.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
+#include <rte_eal_memconfig.h>
 #include <rte_per_lcore.h>
 #include <rte_memory.h>
 #include <rte_launch.h>
@@ -24,7 +24,7 @@
 #include <rte_pause.h>
 #include <rte_memzone.h>
 #include <rte_malloc.h>
-#include <rte_compat.h>
+#include <rte_errno.h>
 
 #include "rte_timer.h"
 
@@ -60,13 +60,12 @@ struct rte_timer_data {
 };
 
 #define RTE_MAX_DATA_ELS 64
+static const struct rte_memzone *rte_timer_data_mz;
+static int *volatile rte_timer_mz_refcnt;
 static struct rte_timer_data *rte_timer_data_arr;
 static const uint32_t default_data_id;
 static uint32_t rte_timer_subsystem_initialized;
 
-/* For maintaining older interfaces for a period */
-static struct rte_timer_data default_timer_data;
-
 /* when debug is enabled, store some statistics */
 #ifdef RTE_LIBRTE_TIMER_DEBUG
 #define __TIMER_STAT_ADD(priv_timer, name, n) do {                     \
@@ -81,7 +80,8 @@ static struct rte_timer_data default_timer_data;
 static inline int
 timer_data_valid(uint32_t id)
 {
-       return !!(rte_timer_data_arr[id].internal_flags & FL_ALLOCATED);
+       return rte_timer_data_arr &&
+               (rte_timer_data_arr[id].internal_flags & FL_ALLOCATED);
 }
 
 /* validate ID and retrieve timer data pointer, or return error value */
@@ -91,7 +91,7 @@ timer_data_valid(uint32_t id)
        timer_data = &rte_timer_data_arr[id];                           \
 } while (0)
 
-int __rte_experimental
+int
 rte_timer_data_alloc(uint32_t *id_ptr)
 {
        int i;
@@ -115,7 +115,7 @@ rte_timer_data_alloc(uint32_t *id_ptr)
        return -ENOSPC;
 }
 
-int __rte_experimental
+int
 rte_timer_data_dealloc(uint32_t id)
 {
        struct rte_timer_data *timer_data;
@@ -126,22 +126,6 @@ rte_timer_data_dealloc(uint32_t id)
        return 0;
 }
 
-void
-rte_timer_subsystem_init_v20(void)
-{
-       unsigned lcore_id;
-       struct priv_timer *priv_timer = default_timer_data.priv_timer;
-
-       /* since priv_timer is static, it's zeroed by default, so only init some
-        * fields.
-        */
-       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) {
-               rte_spinlock_init(&priv_timer[lcore_id].list_lock);
-               priv_timer[lcore_id].prev_lcore = lcore_id;
-       }
-}
-VERSION_SYMBOL(rte_timer_subsystem_init, _v20, 2.0);
-
 /* Init the timer library. Allocate an array of timer data structs in shared
  * memory, and allocate the zeroth entry for use with original timer
  * APIs. Since the intersection of the sets of lcore ids in primary and
@@ -149,66 +133,80 @@ VERSION_SYMBOL(rte_timer_subsystem_init, _v20, 2.0);
  * multiple processes.
  */
 int
-rte_timer_subsystem_init_v1905(void)
+rte_timer_subsystem_init(void)
 {
        const struct rte_memzone *mz;
        struct rte_timer_data *data;
        int i, lcore_id;
        static const char *mz_name = "rte_timer_mz";
+       const size_t data_arr_size =
+                       RTE_MAX_DATA_ELS * sizeof(*rte_timer_data_arr);
+       const size_t mem_size = data_arr_size + sizeof(*rte_timer_mz_refcnt);
+       bool do_full_init = true;
 
-       if (rte_timer_subsystem_initialized)
-               return -EALREADY;
-
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
-               mz = rte_memzone_lookup(mz_name);
-               if (mz == NULL)
-                       return -EEXIST;
-
-               rte_timer_data_arr = mz->addr;
-
-               rte_timer_data_arr[default_data_id].internal_flags |=
-                       FL_ALLOCATED;
+       rte_mcfg_timer_lock();
 
-               rte_timer_subsystem_initialized = 1;
-
-               return 0;
+       if (rte_timer_subsystem_initialized) {
+               rte_mcfg_timer_unlock();
+               return -EALREADY;
        }
 
-       mz = rte_memzone_reserve_aligned(mz_name,
-                       RTE_MAX_DATA_ELS * sizeof(*rte_timer_data_arr),
-                       SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
-       if (mz == NULL)
-               return -ENOMEM;
+       mz = rte_memzone_lookup(mz_name);
+       if (mz == NULL) {
+               mz = rte_memzone_reserve_aligned(mz_name, mem_size,
+                               SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+               if (mz == NULL) {
+                       rte_mcfg_timer_unlock();
+                       return -ENOMEM;
+               }
+               do_full_init = true;
+       } else
+               do_full_init = false;
 
+       rte_timer_data_mz = mz;
        rte_timer_data_arr = mz->addr;
-
-       for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
-               data = &rte_timer_data_arr[i];
-
-               for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
-                       rte_spinlock_init(
-                               &data->priv_timer[lcore_id].list_lock);
-                       data->priv_timer[lcore_id].prev_lcore = lcore_id;
+       rte_timer_mz_refcnt = (void *)((char *)mz->addr + data_arr_size);
+
+       if (do_full_init) {
+               for (i = 0; i < RTE_MAX_DATA_ELS; i++) {
+                       data = &rte_timer_data_arr[i];
+
+                       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE;
+                            lcore_id++) {
+                               rte_spinlock_init(
+                                       &data->priv_timer[lcore_id].list_lock);
+                               data->priv_timer[lcore_id].prev_lcore =
+                                       lcore_id;
+                       }
                }
        }
 
        rte_timer_data_arr[default_data_id].internal_flags |= FL_ALLOCATED;
+       (*rte_timer_mz_refcnt)++;
 
        rte_timer_subsystem_initialized = 1;
 
+       rte_mcfg_timer_unlock();
+
        return 0;
 }
-MAP_STATIC_SYMBOL(int rte_timer_subsystem_init(void),
-                 rte_timer_subsystem_init_v1905);
-BIND_DEFAULT_SYMBOL(rte_timer_subsystem_init, _v1905, 19.05);
 
-void __rte_experimental
+void
 rte_timer_subsystem_finalize(void)
 {
-       if (rte_timer_data_arr)
-               rte_free(rte_timer_data_arr);
+       rte_mcfg_timer_lock();
+
+       if (!rte_timer_subsystem_initialized) {
+               rte_mcfg_timer_unlock();
+               return;
+       }
+
+       if (--(*rte_timer_mz_refcnt) == 0)
+               rte_memzone_free(rte_timer_data_mz);
 
        rte_timer_subsystem_initialized = 0;
+
+       rte_mcfg_timer_unlock();
 }
 
 /* Initialize the timer handle tim for use */
@@ -219,7 +217,7 @@ rte_timer_init(struct rte_timer *tim)
 
        status.state = RTE_TIMER_STOP;
        status.owner = RTE_TIMER_NO_OWNER;
-       tim->status.u32 = status.u32;
+       __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELAXED);
 }
 
 /*
@@ -240,9 +238,9 @@ timer_set_config_state(struct rte_timer *tim,
 
        /* wait that the timer is in correct status before update,
         * and mark it as being configured */
-       while (success == 0) {
-               prev_status.u32 = tim->status.u32;
+       prev_status.u32 = __atomic_load_n(&tim->status.u32, __ATOMIC_RELAXED);
 
+       while (success == 0) {
                /* timer is running on another core
                 * or ready to run on local core, exit
                 */
@@ -259,9 +257,15 @@ timer_set_config_state(struct rte_timer *tim,
                 * mark it atomically as being configured */
                status.state = RTE_TIMER_CONFIG;
                status.owner = (int16_t)lcore_id;
-               success = rte_atomic32_cmpset(&tim->status.u32,
-                                             prev_status.u32,
-                                             status.u32);
+               /* CONFIG states are acting as locked states. If the
+                * timer is in CONFIG state, the state cannot be changed
+                * by other threads. So, we should use ACQUIRE here.
+                */
+               success = __atomic_compare_exchange_n(&tim->status.u32,
+                                             &prev_status.u32,
+                                             status.u32, 0,
+                                             __ATOMIC_ACQUIRE,
+                                             __ATOMIC_RELAXED);
        }
 
        ret_prev_status->u32 = prev_status.u32;
@@ -280,20 +284,27 @@ timer_set_running_state(struct rte_timer *tim)
 
        /* wait that the timer is in correct status before update,
         * and mark it as running */
-       while (success == 0) {
-               prev_status.u32 = tim->status.u32;
+       prev_status.u32 = __atomic_load_n(&tim->status.u32, __ATOMIC_RELAXED);
 
+       while (success == 0) {
                /* timer is not pending anymore */
                if (prev_status.state != RTE_TIMER_PENDING)
                        return -1;
 
-               /* here, we know that timer is stopped or pending,
-                * mark it atomically as being configured */
+               /* we know that the timer will be pending at this point
+                * mark it atomically as being running
+                */
                status.state = RTE_TIMER_RUNNING;
                status.owner = (int16_t)lcore_id;
-               success = rte_atomic32_cmpset(&tim->status.u32,
-                                             prev_status.u32,
-                                             status.u32);
+               /* RUNNING states are acting as locked states. If the
+                * timer is in RUNNING state, the state cannot be changed
+                * by other threads. So, we should use ACQUIRE here.
+                */
+               success = __atomic_compare_exchange_n(&tim->status.u32,
+                                             &prev_status.u32,
+                                             status.u32, 0,
+                                             __ATOMIC_ACQUIRE,
+                                             __ATOMIC_RELAXED);
        }
 
        return 0;
@@ -521,10 +532,12 @@ __rte_timer_reset(struct rte_timer *tim, uint64_t expire,
 
        /* update state: as we are in CONFIG state, only us can modify
         * the state so we don't need to use cmpset() here */
-       rte_wmb();
        status.state = RTE_TIMER_PENDING;
        status.owner = (int16_t)tim_lcore;
-       tim->status.u32 = status.u32;
+       /* The "RELEASE" ordering guarantees the memory operations above
+        * the status update are observed before the update by all threads
+        */
+       __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELEASE);
 
        if (tim_lcore != lcore_id || !local_is_locked)
                rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
@@ -534,44 +547,15 @@ __rte_timer_reset(struct rte_timer *tim, uint64_t expire,
 
 /* Reset and start the timer associated with the timer handle tim */
 int
-rte_timer_reset_v20(struct rte_timer *tim, uint64_t ticks,
-                   enum rte_timer_type type, unsigned int tim_lcore,
-                   rte_timer_cb_t fct, void *arg)
-{
-       uint64_t cur_time = rte_get_timer_cycles();
-       uint64_t period;
-
-       if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) &&
-                       !(rte_lcore_is_enabled(tim_lcore) ||
-                         rte_lcore_has_role(tim_lcore, ROLE_SERVICE))))
-               return -1;
-
-       if (type == PERIODICAL)
-               period = ticks;
-       else
-               period = 0;
-
-       return __rte_timer_reset(tim,  cur_time + ticks, period, tim_lcore,
-                         fct, arg, 0, &default_timer_data);
-}
-VERSION_SYMBOL(rte_timer_reset, _v20, 2.0);
-
-int
-rte_timer_reset_v1905(struct rte_timer *tim, uint64_t ticks,
+rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
                      enum rte_timer_type type, unsigned int tim_lcore,
                      rte_timer_cb_t fct, void *arg)
 {
        return rte_timer_alt_reset(default_data_id, tim, ticks, type,
                                   tim_lcore, fct, arg);
 }
-MAP_STATIC_SYMBOL(int rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
-                                     enum rte_timer_type type,
-                                     unsigned int tim_lcore,
-                                     rte_timer_cb_t fct, void *arg),
-                 rte_timer_reset_v1905);
-BIND_DEFAULT_SYMBOL(rte_timer_reset, _v1905, 19.05);
-
-int __rte_experimental
+
+int
 rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
                    uint64_t ticks, enum rte_timer_type type,
                    unsigned int tim_lcore, rte_timer_cb_t fct, void *arg)
@@ -582,11 +566,6 @@ rte_timer_alt_reset(uint32_t timer_data_id, struct rte_timer *tim,
 
        TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL);
 
-       if (unlikely((tim_lcore != (unsigned int)LCORE_ID_ANY) &&
-                       !(rte_lcore_is_enabled(tim_lcore) ||
-                         rte_lcore_has_role(tim_lcore, ROLE_SERVICE))))
-               return -1;
-
        if (type == PERIODICAL)
                period = ticks;
        else
@@ -635,32 +614,24 @@ __rte_timer_stop(struct rte_timer *tim, int local_is_locked,
        }
 
        /* mark timer as stopped */
-       rte_wmb();
        status.state = RTE_TIMER_STOP;
        status.owner = RTE_TIMER_NO_OWNER;
-       tim->status.u32 = status.u32;
+       /* The "RELEASE" ordering guarantees the memory operations above
+        * the status update are observed before the update by all threads
+        */
+       __atomic_store_n(&tim->status.u32, status.u32, __ATOMIC_RELEASE);
 
        return 0;
 }
 
 /* Stop the timer associated with the timer handle tim */
 int
-rte_timer_stop_v20(struct rte_timer *tim)
-{
-       return __rte_timer_stop(tim, 0, &default_timer_data);
-}
-VERSION_SYMBOL(rte_timer_stop, _v20, 2.0);
-
-int
-rte_timer_stop_v1905(struct rte_timer *tim)
+rte_timer_stop(struct rte_timer *tim)
 {
        return rte_timer_alt_stop(default_data_id, tim);
 }
-MAP_STATIC_SYMBOL(int rte_timer_stop(struct rte_timer *tim),
-                 rte_timer_stop_v1905);
-BIND_DEFAULT_SYMBOL(rte_timer_stop, _v1905, 19.05);
 
-int __rte_experimental
+int
 rte_timer_alt_stop(uint32_t timer_data_id, struct rte_timer *tim)
 {
        struct rte_timer_data *timer_data;
@@ -682,7 +653,8 @@ rte_timer_stop_sync(struct rte_timer *tim)
 int
 rte_timer_pending(struct rte_timer *tim)
 {
-       return tim->status.state == RTE_TIMER_PENDING;
+       return __atomic_load_n(&tim->status.state,
+                               __ATOMIC_RELAXED) == RTE_TIMER_PENDING;
 }
 
 /* must be called periodically, run all timer that expired */
@@ -784,8 +756,12 @@ __rte_timer_manage(struct rte_timer_data *timer_data)
                        /* remove from done list and mark timer as stopped */
                        status.state = RTE_TIMER_STOP;
                        status.owner = RTE_TIMER_NO_OWNER;
-                       rte_wmb();
-                       tim->status.u32 = status.u32;
+                       /* The "RELEASE" ordering guarantees the memory
+                        * operations above the status update are observed
+                        * before the update by all threads
+                        */
+                       __atomic_store_n(&tim->status.u32, status.u32,
+                               __ATOMIC_RELEASE);
                }
                else {
                        /* keep it in list and mark timer as pending */
@@ -793,8 +769,12 @@ __rte_timer_manage(struct rte_timer_data *timer_data)
                        status.state = RTE_TIMER_PENDING;
                        __TIMER_STAT_ADD(priv_timer, pending, 1);
                        status.owner = (int16_t)lcore_id;
-                       rte_wmb();
-                       tim->status.u32 = status.u32;
+                       /* The "RELEASE" ordering guarantees the memory
+                        * operations above the status update are observed
+                        * before the update by all threads
+                        */
+                       __atomic_store_n(&tim->status.u32, status.u32,
+                               __ATOMIC_RELEASE);
                        __rte_timer_reset(tim, tim->expire + tim->period,
                                tim->period, lcore_id, tim->f, tim->arg, 1,
                                timer_data);
@@ -804,15 +784,8 @@ __rte_timer_manage(struct rte_timer_data *timer_data)
        priv_timer[lcore_id].running_tim = NULL;
 }
 
-void
-rte_timer_manage_v20(void)
-{
-       __rte_timer_manage(&default_timer_data);
-}
-VERSION_SYMBOL(rte_timer_manage, _v20, 2.0);
-
 int
-rte_timer_manage_v1905(void)
+rte_timer_manage(void)
 {
        struct rte_timer_data *timer_data;
 
@@ -822,19 +795,17 @@ rte_timer_manage_v1905(void)
 
        return 0;
 }
-MAP_STATIC_SYMBOL(int rte_timer_manage(void), rte_timer_manage_v1905);
-BIND_DEFAULT_SYMBOL(rte_timer_manage, _v1905, 19.05);
 
-int __rte_experimental
+int
 rte_timer_alt_manage(uint32_t timer_data_id,
                     unsigned int *poll_lcores,
                     int nb_poll_lcores,
                     rte_timer_alt_manage_cb_t f)
 {
+       unsigned int default_poll_lcores[] = {rte_lcore_id()};
        union rte_timer_status status;
        struct rte_timer *tim, *next_tim, **pprev;
        struct rte_timer *run_first_tims[RTE_MAX_LCORE];
-       unsigned int runlist_lcore_ids[RTE_MAX_LCORE];
        unsigned int this_lcore = rte_lcore_id();
        struct rte_timer *prev[MAX_SKIPLIST_DEPTH + 1];
        uint64_t cur_time;
@@ -852,8 +823,8 @@ rte_timer_alt_manage(uint32_t timer_data_id,
        __TIMER_STAT_ADD(data->priv_timer, manage, 1);
 
        if (poll_lcores == NULL) {
-               poll_lcores = (unsigned int []){rte_lcore_id()};
-               nb_poll_lcores = 1;
+               poll_lcores = default_poll_lcores;
+               nb_poll_lcores = RTE_DIM(default_poll_lcores);
        }
 
        for (i = 0; i < nb_poll_lcores; i++) {
@@ -903,7 +874,6 @@ rte_timer_alt_manage(uint32_t timer_data_id,
 
                /* transition run-list from PENDING to RUNNING */
                run_first_tims[nb_runlists] = tim;
-               runlist_lcore_ids[nb_runlists] = poll_lcore;
                pprev = &run_first_tims[nb_runlists];
                nb_runlists++;
 
@@ -950,33 +920,36 @@ rte_timer_alt_manage(uint32_t timer_data_id,
                        break;
 
                tim = run_first_tims[min_idx];
-               privp = &data->priv_timer[runlist_lcore_ids[min_idx]];
 
                /* Move down the runlist from which we picked a timer to
                 * execute
                 */
                run_first_tims[min_idx] = run_first_tims[min_idx]->sl_next[0];
 
-               privp->updated = 0;
-               privp->running_tim = tim;
+               data->priv_timer[this_lcore].updated = 0;
+               data->priv_timer[this_lcore].running_tim = tim;
 
                /* Call the provided callback function */
                f(tim);
 
-               __TIMER_STAT_ADD(privp, pending, -1);
+               __TIMER_STAT_ADD(data->priv_timer, pending, -1);
 
                /* the timer was stopped or reloaded by the callback
                 * function, we have nothing to do here
                 */
-               if (privp->updated == 1)
+               if (data->priv_timer[this_lcore].updated == 1)
                        continue;
 
                if (tim->period == 0) {
                        /* remove from done list and mark timer as stopped */
                        status.state = RTE_TIMER_STOP;
                        status.owner = RTE_TIMER_NO_OWNER;
-                       rte_wmb();
-                       tim->status.u32 = status.u32;
+                       /* The "RELEASE" ordering guarantees the memory
+                        * operations above the status update are observed
+                        * before the update by all threads
+                        */
+                       __atomic_store_n(&tim->status.u32, status.u32,
+                               __ATOMIC_RELEASE);
                } else {
                        /* keep it in list and mark timer as pending */
                        rte_spinlock_lock(
@@ -984,8 +957,12 @@ rte_timer_alt_manage(uint32_t timer_data_id,
                        status.state = RTE_TIMER_PENDING;
                        __TIMER_STAT_ADD(data->priv_timer, pending, 1);
                        status.owner = (int16_t)this_lcore;
-                       rte_wmb();
-                       tim->status.u32 = status.u32;
+                       /* The "RELEASE" ordering guarantees the memory
+                        * operations above the status update are observed
+                        * before the update by all threads
+                        */
+                       __atomic_store_n(&tim->status.u32, status.u32,
+                               __ATOMIC_RELEASE);
                        __rte_timer_reset(tim, tim->expire + tim->period,
                                tim->period, this_lcore, tim->f, tim->arg, 1,
                                data);
@@ -993,14 +970,14 @@ rte_timer_alt_manage(uint32_t timer_data_id,
                                &data->priv_timer[this_lcore].list_lock);
                }
 
-               privp->running_tim = NULL;
+               data->priv_timer[this_lcore].running_tim = NULL;
        }
 
        return 0;
 }
 
 /* Walk pending lists, stopping timers and calling user-specified function */
-int __rte_experimental
+int
 rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores,
                   int nb_walk_lcores,
                   rte_timer_stop_all_cb_t f, void *f_arg)
@@ -1037,6 +1014,33 @@ rte_timer_stop_all(uint32_t timer_data_id, unsigned int *walk_lcores,
        return 0;
 }
 
+int64_t
+rte_timer_next_ticks(void)
+{
+       unsigned int lcore_id = rte_lcore_id();
+       struct rte_timer_data *timer_data;
+       struct priv_timer *priv_timer;
+       const struct rte_timer *tm;
+       uint64_t cur_time;
+       int64_t left = -ENOENT;
+
+       TIMER_DATA_VALID_GET_OR_ERR_RET(default_data_id, timer_data, -EINVAL);
+
+       priv_timer = timer_data->priv_timer;
+       cur_time = rte_get_timer_cycles();
+
+       rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
+       tm = priv_timer[lcore_id].pending_head.sl_next[0];
+       if (tm) {
+               left = tm->expire - cur_time;
+               if (left < 0)
+                       left = 0;
+       }
+       rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
+
+       return left;
+}
+
 /* dump statistics about timers */
 static void
 __rte_timer_dump_stats(struct rte_timer_data *timer_data __rte_unused, FILE *f)
@@ -1063,23 +1067,13 @@ __rte_timer_dump_stats(struct rte_timer_data *timer_data __rte_unused, FILE *f)
 #endif
 }
 
-void
-rte_timer_dump_stats_v20(FILE *f)
-{
-       __rte_timer_dump_stats(&default_timer_data, f);
-}
-VERSION_SYMBOL(rte_timer_dump_stats, _v20, 2.0);
-
 int
-rte_timer_dump_stats_v1905(FILE *f)
+rte_timer_dump_stats(FILE *f)
 {
        return rte_timer_alt_dump_stats(default_data_id, f);
 }
-MAP_STATIC_SYMBOL(int rte_timer_dump_stats(FILE *f),
-                 rte_timer_dump_stats_v1905);
-BIND_DEFAULT_SYMBOL(rte_timer_dump_stats, _v1905, 19.05);
 
-int __rte_experimental
+int
 rte_timer_alt_dump_stats(uint32_t timer_data_id __rte_unused, FILE *f)
 {
        struct rte_timer_data *timer_data;