#include <rte_atomic.h>
#include <rte_memory.h>
#include <rte_malloc.h>
+#include <rte_spinlock.h>
#include "eal_private.h"
/* public part of the struct */
struct rte_service_spec spec;
- /* atomic lock that when set indicates a service core is currently
+ /* spin lock that when set indicates a service core is currently
* running this service callback. When not set, a core may take the
* lock and then run the service callback.
*/
- rte_atomic32_t execute_lock;
+ rte_spinlock_t execute_lock;
/* API set/get-able variables */
int8_t app_runstate;
* It does not indicate the number of cores the service is running
* on currently.
*/
- rte_atomic32_t num_mapped_cores;
+ uint32_t num_mapped_cores;
uint64_t calls;
uint64_t cycles_spent;
} __rte_cache_aligned;
rte_smp_rmb();
int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
- int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
+ int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,
+ __ATOMIC_RELAXED) > 0);
return (s->app_runstate == RUNSTATE_RUNNING) &&
(s->comp_runstate == RUNSTATE_RUNNING) &&
cs->service_active_on_lcore[i] = 1;
if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
- if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
+ if (!rte_spinlock_trylock(&s->execute_lock))
return -EBUSY;
service_runner_do_callback(s, cs, i);
- rte_atomic32_clear(&s->execute_lock);
+ rte_spinlock_unlock(&s->execute_lock);
} else
service_runner_do_callback(s, cs, i);
/* Increment num_mapped_cores to reflect that this core is
* now mapped capable of running the service.
*/
- rte_atomic32_inc(&s->num_mapped_cores);
+ __atomic_add_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
- rte_atomic32_dec(&s->num_mapped_cores);
+ __atomic_sub_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
return ret;
}
if (*set && !lcore_mapped) {
lcore_states[lcore].service_mask |= sid_mask;
- rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
+ __atomic_add_fetch(&rte_services[sid].num_mapped_cores,
+ 1, __ATOMIC_RELAXED);
}
if (!*set && lcore_mapped) {
lcore_states[lcore].service_mask &= ~(sid_mask);
- rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
+ __atomic_sub_fetch(&rte_services[sid].num_mapped_cores,
+ 1, __ATOMIC_RELAXED);
}
}
if (enabled)
*enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
- rte_smp_wmb();
-
return 0;
}
}
}
for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
- rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
+ __atomic_store_n(&rte_services[i].num_mapped_cores, 0,
+ __ATOMIC_RELAXED);
rte_smp_wmb();
int32_t enabled = service_mask & (UINT64_C(1) << i);
int32_t service_running = rte_service_runstate_get(i);
int32_t only_core = (1 ==
- rte_atomic32_read(&rte_services[i].num_mapped_cores));
+ __atomic_load_n(&rte_services[i].num_mapped_cores,
+ __ATOMIC_RELAXED));
/* if the core is mapped, and the service is running, and this
* is the only core that is mapped, the service would cease to