+service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
+ struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
+{
+ if (!s)
+ return -EINVAL;
+
+ /* comp_runstate and app_runstate act as the guard variables.
+ * Use load-acquire memory order. This synchronizes with
+ * store-release in service state set functions.
+ */
+ if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) !=
+ RUNSTATE_RUNNING ||
+ __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) !=
+ RUNSTATE_RUNNING ||
+ !(service_mask & (UINT64_C(1) << i))) {
+ cs->service_active_on_lcore[i] = 0;
+ return -ENOEXEC;
+ }
+
+ cs->service_active_on_lcore[i] = 1;
+
+ if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
+ if (!rte_spinlock_trylock(&s->execute_lock))
+ return -EBUSY;
+
+ service_runner_do_callback(s, cs, i);
+ rte_spinlock_unlock(&s->execute_lock);
+ } else
+ service_runner_do_callback(s, cs, i);
+
+ return 0;
+}
+
+int32_t
+rte_service_may_be_active(uint32_t id)
+{
+ uint32_t ids[RTE_MAX_LCORE] = {0};
+ int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
+ int i;
+
+ if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))
+ return -EINVAL;
+
+ for (i = 0; i < lcore_count; i++) {
+ if (lcore_states[i].service_active_on_lcore[id])
+ return 1;
+ }
+
+ return 0;
+}
+
+int32_t
+rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
+{
+ struct core_state *cs = &lcore_states[rte_lcore_id()];
+ struct rte_service_spec_impl *s;
+
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+
+ /* Increment num_mapped_cores to reflect that this core is
+ * now mapped capable of running the service.
+ */
+ __atomic_add_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
+
+ int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
+
+ __atomic_sub_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
+
+ return ret;
+}
+
+static int32_t
+service_runner_func(void *arg)