s->spec = *spec;
s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
- rte_smp_wmb();
rte_service_count++;
if (id_ptr)
SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
rte_service_count--;
- rte_smp_wmb();
s->internal_flags &= ~(SERVICE_F_REGISTERED);
struct rte_service_spec_impl *s;
SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+ /* comp_runstate act as the guard variable. Use store-release
+ * memory order. This synchronizes with load-acquire in
+ * service_run and service_runstate_get function.
+ */
if (runstate)
- s->comp_runstate = RUNSTATE_RUNNING;
+ __atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING,
+ __ATOMIC_RELEASE);
else
- s->comp_runstate = RUNSTATE_STOPPED;
+ __atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED,
+ __ATOMIC_RELEASE);
- rte_smp_wmb();
return 0;
}
struct rte_service_spec_impl *s;
SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+ /* app_runstate act as the guard variable. Use store-release
+ * memory order. This synchronizes with load-acquire in
+ * service_run runstate_get function.
+ */
if (runstate)
- s->app_runstate = RUNSTATE_RUNNING;
+ __atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING,
+ __ATOMIC_RELEASE);
else
- s->app_runstate = RUNSTATE_STOPPED;
+ __atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED,
+ __ATOMIC_RELEASE);
- rte_smp_wmb();
return 0;
}
{
struct rte_service_spec_impl *s;
SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
- rte_smp_rmb();
- int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
- int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,
- __ATOMIC_RELAXED) > 0);
+ /* comp_runstate and app_runstate act as the guard variables.
+ * Use load-acquire memory order. This synchronizes with
+ * store-release in service state set functions.
+ */
+ if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) ==
+ RUNSTATE_RUNNING &&
+ __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) ==
+ RUNSTATE_RUNNING) {
+ int check_disabled = !(s->internal_flags &
+ SERVICE_F_START_CHECK);
+ int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,
+ __ATOMIC_RELAXED) > 0);
+
+ return (check_disabled | lcore_mapped);
+ } else
+ return 0;
- return (s->app_runstate == RUNSTATE_RUNNING) &&
- (s->comp_runstate == RUNSTATE_RUNNING) &&
- (check_disabled | lcore_mapped);
}
static inline void
if (!s)
return -EINVAL;
- if (s->comp_runstate != RUNSTATE_RUNNING ||
- s->app_runstate != RUNSTATE_RUNNING ||
- !(service_mask & (UINT64_C(1) << i))) {
+ /* comp_runstate and app_runstate act as the guard variables.
+ * Use load-acquire memory order. This synchronizes with
+ * store-release in service state set functions.
+ */
+ if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) !=
+ RUNSTATE_RUNNING ||
+ __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) !=
+ RUNSTATE_RUNNING ||
+ !(service_mask & (UINT64_C(1) << i))) {
cs->service_active_on_lcore[i] = 0;
return -ENOEXEC;
}
const int lcore = rte_lcore_id();
struct core_state *cs = &lcore_states[lcore];
- while (cs->runstate == RUNSTATE_RUNNING) {
+ /* runstate act as the guard variable. Use load-acquire
+ * memory order here to synchronize with store-release
+ * in runstate update functions.
+ */
+ while (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
+ RUNSTATE_RUNNING) {
const uint64_t service_mask = cs->service_mask;
for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
}
cs->loops++;
-
- rte_smp_rmb();
}
lcore_config[lcore].state = WAIT;
if (lcore_states[i].is_service_core) {
lcore_states[i].service_mask = 0;
set_lcore_state(i, ROLE_RTE);
- lcore_states[i].runstate = RUNSTATE_STOPPED;
+ /* runstate act as guard variable Use
+ * store-release memory order here to synchronize
+ * with load-acquire in runstate read functions.
+ */
+ __atomic_store_n(&lcore_states[i].runstate,
+ RUNSTATE_STOPPED, __ATOMIC_RELEASE);
}
}
for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
__atomic_store_n(&rte_services[i].num_mapped_cores, 0,
__ATOMIC_RELAXED);
- rte_smp_wmb();
-
return 0;
}
/* ensure that after adding a core the mask and state are defaults */
lcore_states[lcore].service_mask = 0;
- lcore_states[lcore].runstate = RUNSTATE_STOPPED;
-
- rte_smp_wmb();
+ /* Use store-release memory order here to synchronize with
+ * load-acquire in runstate read functions.
+ */
+ __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
+ __ATOMIC_RELEASE);
return rte_eal_wait_lcore(lcore);
}
if (!cs->is_service_core)
return -EINVAL;
- if (cs->runstate != RUNSTATE_STOPPED)
+ /* runstate act as the guard variable. Use load-acquire
+ * memory order here to synchronize with store-release
+ * in runstate update functions.
+ */
+ if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) !=
+ RUNSTATE_STOPPED)
return -EBUSY;
set_lcore_state(lcore, ROLE_RTE);
if (!cs->is_service_core)
return -EINVAL;
- if (cs->runstate == RUNSTATE_RUNNING)
+ /* runstate act as the guard variable. Use load-acquire
+ * memory order here to synchronize with store-release
+ * in runstate update functions.
+ */
+ if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
+ RUNSTATE_RUNNING)
return -EALREADY;
/* set core to run state first, and then launch otherwise it will
* return immediately as runstate keeps it in the service poll loop
*/
- cs->runstate = RUNSTATE_RUNNING;
+ /* Use load-acquire memory order here to synchronize with
+ * store-release in runstate update functions.
+ */
+ __atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE);
int ret = rte_eal_remote_launch(service_runner_func, 0, lcore);
/* returns -EBUSY if the core is already launched, 0 on success */
if (lcore >= RTE_MAX_LCORE)
return -EINVAL;
- if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
+ /* runstate act as the guard variable. Use load-acquire
+ * memory order here to synchronize with store-release
+ * in runstate update functions.
+ */
+ if (__atomic_load_n(&lcore_states[lcore].runstate, __ATOMIC_ACQUIRE) ==
+ RUNSTATE_STOPPED)
return -EALREADY;
uint32_t i;
return -EBUSY;
}
- lcore_states[lcore].runstate = RUNSTATE_STOPPED;
+ /* Use store-release memory order here to synchronize with
+ * load-acquire in runstate read functions.
+ */
+ __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
+ __ATOMIC_RELEASE);
return 0;
}