return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
}
+/* validate ID and retrieve service pointer, or return error value */
+#define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \
+ if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id)) \
+ return retval; \
+ service = &rte_services[id]; \
+} while (0)
+
/* returns 1 if statistics should be colleced for service
* Returns 0 if statistics should not be collected for service
*/
}
const char *
-rte_service_get_name(const struct rte_service_spec *service)
-{
- return service->name;
-}
-
-int32_t
-rte_service_probe_capability(const struct rte_service_spec *service,
- uint32_t capability)
+rte_service_get_name(uint32_t id)
{
- return service->capabilities & capability;
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
+ return s->spec.name;
}
int32_t
-rte_service_is_running(const struct rte_service_spec *spec)
+rte_service_probe_capability(uint32_t id, uint32_t capability)
{
- const struct rte_service_spec_impl *impl =
- (const struct rte_service_spec_impl *)spec;
- if (!impl)
- return -EINVAL;
-
- return (impl->runstate == RUNSTATE_RUNNING) &&
- (impl->num_mapped_cores > 0);
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+ return s->spec.capabilities & capability;
}
int32_t
-rte_service_register(const struct rte_service_spec *spec)
+rte_service_component_register(const struct rte_service_spec *spec,
+ uint32_t *id_ptr)
{
uint32_t i;
int32_t free_slot = -1;
rte_smp_wmb();
rte_service_count++;
+ if (id_ptr)
+ *id_ptr = free_slot;
+
return 0;
}
s->internal_flags &= ~(SERVICE_F_REGISTERED);
for (i = 0; i < RTE_MAX_LCORE; i++)
- lcore_states[i].service_mask &= ~(1 << service_id);
+ lcore_states[i].service_mask &= ~(UINT64_C(1) << service_id);
memset(&rte_services[service_id], 0,
sizeof(struct rte_service_spec_impl));
}
int32_t
-rte_service_start(struct rte_service_spec *service)
+rte_service_runstate_set(uint32_t id, uint32_t runstate)
{
- struct rte_service_spec_impl *s =
- (struct rte_service_spec_impl *)service;
- s->runstate = RUNSTATE_RUNNING;
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+
+ if (runstate)
+ s->runstate = RUNSTATE_RUNNING;
+ else
+ s->runstate = RUNSTATE_STOPPED;
+
rte_smp_wmb();
return 0;
}
int32_t
-rte_service_stop(struct rte_service_spec *service)
+rte_service_runstate_get(uint32_t id)
{
- struct rte_service_spec_impl *s =
- (struct rte_service_spec_impl *)service;
- s->runstate = RUNSTATE_STOPPED;
- rte_smp_wmb();
- return 0;
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+
+ return (s->runstate == RUNSTATE_RUNNING) && (s->num_mapped_cores > 0);
}
static int32_t
for (i = 0; i < rte_service_count; i++) {
struct rte_service_spec_impl *s = &rte_services[i];
if (s->runstate != RUNSTATE_RUNNING ||
- !(service_mask & (1 << i)))
+ !(service_mask & (UINT64_C(1) << i)))
continue;
/* check do we need cmpset, if MT safe or <= 1 core
return count;
}
+int32_t
+rte_service_lcore_count_services(uint32_t lcore)
+{
+ if (lcore >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ struct core_state *cs = &lcore_states[lcore];
+ if (!cs->is_service_core)
+ return -ENOTSUP;
+
+ return __builtin_popcountll(cs->service_mask);
+}
+
int32_t
rte_service_start_with_defaults(void)
{
* should multiplex to a single core, or 1:1 if there are the
* same amount of services as service-cores
*/
- ret = rte_service_enable_on_lcore(s, ids[lcore_iter]);
+ ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
if (ret)
return -ENODEV;
if (lcore_iter >= lcore_count)
lcore_iter = 0;
- ret = rte_service_start(s);
+ ret = rte_service_runstate_set(i, 1);
if (ret)
return -ENOEXEC;
}
if (!lcore_states[lcore].is_service_core)
return -EINVAL;
+ uint64_t sid_mask = UINT64_C(1) << sid;
if (set) {
if (*set) {
- lcore_states[lcore].service_mask |= (1 << sid);
+ lcore_states[lcore].service_mask |= sid_mask;
rte_services[sid].num_mapped_cores++;
} else {
- lcore_states[lcore].service_mask &= ~(1 << sid);
+ lcore_states[lcore].service_mask &= ~(sid_mask);
rte_services[sid].num_mapped_cores--;
}
}
if (enabled)
- *enabled = (lcore_states[lcore].service_mask & (1 << sid));
+ *enabled = (lcore_states[lcore].service_mask & (sid_mask));
rte_smp_wmb();
return 0;
}
-int32_t rte_service_get_enabled_on_lcore(struct rte_service_spec *service,
- uint32_t lcore)
-{
- uint32_t enabled;
- int ret = service_update(service, lcore, 0, &enabled);
- if (ret == 0)
- return enabled;
- return -EINVAL;
-}
-
int32_t
-rte_service_enable_on_lcore(struct rte_service_spec *service, uint32_t lcore)
+rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
{
- uint32_t on = 1;
- return service_update(service, lcore, &on, 0);
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+ uint32_t on = enabled > 0;
+ return service_update(&s->spec, lcore, &on, 0);
}
int32_t
-rte_service_disable_on_lcore(struct rte_service_spec *service, uint32_t lcore)
+rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
{
- uint32_t off = 0;
- return service_update(service, lcore, &off, 0);
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+ uint32_t enabled;
+ int ret = service_update(&s->spec, lcore, 0, &enabled);
+ if (ret == 0)
+ return enabled;
+ return ret;
}
int32_t rte_service_lcore_reset_all(void)
uint32_t i;
for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
- int32_t enabled = lcore_states[i].service_mask & (1 << i);
+ int32_t enabled =
+ lcore_states[i].service_mask & (UINT64_C(1) << i);
int32_t service_running = rte_services[i].runstate !=
RUNSTATE_STOPPED;
int32_t only_core = rte_services[i].num_mapped_cores == 1;