+int32_t
+rte_service_runstate_get(uint32_t id)
+{
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+ rte_smp_rmb();
+
+ int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
+ int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
+
+ return (s->app_runstate == RUNSTATE_RUNNING) &&
+ (s->comp_runstate == RUNSTATE_RUNNING) &&
+ (check_disabled | lcore_mapped);
+}
+
+static inline void
+rte_service_runner_do_callback(struct rte_service_spec_impl *s,
+ struct core_state *cs, uint32_t service_idx)
+{
+ void *userdata = s->spec.callback_userdata;
+
+ if (service_stats_enabled(s)) {
+ uint64_t start = rte_rdtsc();
+ s->spec.callback(userdata);
+ uint64_t end = rte_rdtsc();
+ s->cycles_spent += end - start;
+ cs->calls_per_service[service_idx]++;
+ s->calls++;
+ } else
+ s->spec.callback(userdata);
+}
+
+
+/* Expects the service 's' is valid. */
+static int32_t
+service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
+ struct rte_service_spec_impl *s)
+{
+ if (!s)
+ return -EINVAL;
+
+ if (s->comp_runstate != RUNSTATE_RUNNING ||
+ s->app_runstate != RUNSTATE_RUNNING ||
+ !(service_mask & (UINT64_C(1) << i))) {
+ cs->service_active_on_lcore[i] = 0;
+ return -ENOEXEC;
+ }
+
+ cs->service_active_on_lcore[i] = 1;
+
+ /* check do we need cmpset, if MT safe or <= 1 core
+ * mapped, atomic ops are not required.
+ */
+ const int use_atomics = (service_mt_safe(s) == 0) &&
+ (rte_atomic32_read(&s->num_mapped_cores) > 1);
+ if (use_atomics) {
+ if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
+ return -EBUSY;
+
+ rte_service_runner_do_callback(s, cs, i);
+ rte_atomic32_clear(&s->execute_lock);
+ } else
+ rte_service_runner_do_callback(s, cs, i);
+
+ return 0;
+}
+
+int32_t
+rte_service_may_be_active(uint32_t id)
+{
+ uint32_t ids[RTE_MAX_LCORE] = {0};
+ int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
+ int i;
+
+ if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))
+ return -EINVAL;
+
+ for (i = 0; i < lcore_count; i++) {
+ if (lcore_states[i].service_active_on_lcore[id])
+ return 1;
+ }
+
+ return 0;
+}
+
+int32_t
+rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
+{
+ struct core_state *cs = &lcore_states[rte_lcore_id()];
+ struct rte_service_spec_impl *s;
+
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+
+ /* Atomically add this core to the mapped cores first, then examine if
+ * we can run the service. This avoids a race condition between
+ * checking the value, and atomically adding to the mapped count.
+ */
+ if (serialize_mt_unsafe)
+ rte_atomic32_inc(&s->num_mapped_cores);
+
+ if (service_mt_safe(s) == 0 &&
+ rte_atomic32_read(&s->num_mapped_cores) > 1) {
+ if (serialize_mt_unsafe)
+ rte_atomic32_dec(&s->num_mapped_cores);
+ return -EBUSY;
+ }
+
+ int ret = service_run(id, cs, UINT64_MAX, s);
+
+ if (serialize_mt_unsafe)
+ rte_atomic32_dec(&s->num_mapped_cores);
+
+ return ret;
+}
+