X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Frte_service.c;h=ae97e6b7fbfd84d9062f6e843bfa91077620e2bc;hb=e32cb57973fc311b4b5f60ae5dac37d99e48c94d;hp=a14b23bc66c89353a14fca43b4e0b35c2109b45c;hpb=a894d4815f79b0d76527d9c42b23327de1501711;p=dpdk.git diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c index a14b23bc66..ae97e6b7fb 100644 --- a/lib/librte_eal/common/rte_service.c +++ b/lib/librte_eal/common/rte_service.c @@ -54,6 +54,7 @@ #define SERVICE_F_REGISTERED (1 << 0) #define SERVICE_F_STATS_ENABLED (1 << 1) +#define SERVICE_F_START_CHECK (1 << 2) /* runstates for services and lcores, denoting if they are active or not */ #define RUNSTATE_STOPPED 0 @@ -71,11 +72,12 @@ struct rte_service_spec_impl { rte_atomic32_t execute_lock; /* API set/get-able variables */ - int32_t runstate; + int8_t app_runstate; + int8_t comp_runstate; uint8_t internal_flags; /* per service statistics */ - uint32_t num_mapped_cores; + rte_atomic32_t num_mapped_cores; uint64_t calls; uint64_t cycles_spent; } __rte_cache_aligned; @@ -151,7 +153,7 @@ service_valid(uint32_t id) service = &rte_services[id]; \ } while (0) -/* returns 1 if statistics should be colleced for service +/* returns 1 if statistics should be collected for service * Returns 0 if statistics should not be collected for service */ static inline int @@ -163,21 +165,31 @@ service_stats_enabled(struct rte_service_spec_impl *impl) static inline int service_mt_safe(struct rte_service_spec_impl *s) { - return s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE; + return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE); } -int32_t rte_service_set_stats_enable(struct rte_service_spec *service, - int32_t enabled) +int32_t rte_service_set_stats_enable(uint32_t id, int32_t enabled) { - struct rte_service_spec_impl *impl = - (struct rte_service_spec_impl *)service; - if (!impl) - return -EINVAL; + struct rte_service_spec_impl *s; + SERVICE_VALID_GET_OR_ERR_RET(id, s, 0); if (enabled) - impl->internal_flags |= SERVICE_F_STATS_ENABLED; + s->internal_flags |= SERVICE_F_STATS_ENABLED; else - impl->internal_flags &= ~(SERVICE_F_STATS_ENABLED); + s->internal_flags &= ~(SERVICE_F_STATS_ENABLED); + + return 0; +} + +int32_t rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled) +{ + struct rte_service_spec_impl *s; + SERVICE_VALID_GET_OR_ERR_RET(id, s, 0); + + if (enabled) + s->internal_flags |= SERVICE_F_START_CHECK; + else + s->internal_flags &= ~(SERVICE_F_START_CHECK); return 0; } @@ -188,29 +200,21 @@ rte_service_get_count(void) return rte_service_count; } -struct rte_service_spec * -rte_service_get_by_id(uint32_t id) +int32_t rte_service_get_by_name(const char *name, uint32_t *service_id) { - struct rte_service_spec *service = NULL; - if (id < rte_service_count) - service = (struct rte_service_spec *)&rte_services[id]; - - return service; -} + if (!service_id) + return -EINVAL; -struct rte_service_spec *rte_service_get_by_name(const char *name) -{ - struct rte_service_spec *service = NULL; int i; for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { if (service_valid(i) && strcmp(name, rte_services[i].spec.name) == 0) { - service = (struct rte_service_spec *)&rte_services[i]; - break; + *service_id = i; + return 0; } } - return service; + return -ENODEV; } const char * @@ -226,19 +230,7 @@ rte_service_probe_capability(uint32_t id, uint32_t capability) { struct rte_service_spec_impl *s; SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); - return s->spec.capabilities & capability; -} - -int32_t -rte_service_is_running(const struct rte_service_spec *spec) -{ - const struct rte_service_spec_impl *impl = - (const struct rte_service_spec_impl *)spec; - if (!impl) - return -EINVAL; - - return (impl->runstate == RUNSTATE_RUNNING) && - (impl->num_mapped_cores > 0); + return !!(s->spec.capabilities & capability); } int32_t @@ -263,7 +255,7 @@ rte_service_component_register(const struct rte_service_spec *spec, struct rte_service_spec_impl *s = &rte_services[free_slot]; s->spec = *spec; - s->internal_flags |= SERVICE_F_REGISTERED; + s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK; rte_smp_wmb(); rte_service_count++; @@ -275,59 +267,149 @@ rte_service_component_register(const struct rte_service_spec *spec, } int32_t -rte_service_unregister(struct rte_service_spec *spec) +rte_service_component_unregister(uint32_t id) { - struct rte_service_spec_impl *s = NULL; - struct rte_service_spec_impl *spec_impl = - (struct rte_service_spec_impl *)spec; - uint32_t i; - uint32_t service_id; - for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { - if (&rte_services[i] == spec_impl) { - s = spec_impl; - service_id = i; - break; - } - } - - if (!s) - return -EINVAL; + struct rte_service_spec_impl *s; + SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); rte_service_count--; rte_smp_wmb(); s->internal_flags &= ~(SERVICE_F_REGISTERED); + /* clear the run-bit in all cores */ for (i = 0; i < RTE_MAX_LCORE; i++) - lcore_states[i].service_mask &= ~(UINT64_C(1) << service_id); + lcore_states[i].service_mask &= ~(UINT64_C(1) << id); - memset(&rte_services[service_id], 0, - sizeof(struct rte_service_spec_impl)); + memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl)); return 0; } int32_t -rte_service_start(struct rte_service_spec *service) +rte_service_component_runstate_set(uint32_t id, uint32_t runstate) { - struct rte_service_spec_impl *s = - (struct rte_service_spec_impl *)service; - s->runstate = RUNSTATE_RUNNING; + struct rte_service_spec_impl *s; + SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); + + if (runstate) + s->comp_runstate = RUNSTATE_RUNNING; + else + s->comp_runstate = RUNSTATE_STOPPED; + rte_smp_wmb(); return 0; } int32_t -rte_service_stop(struct rte_service_spec *service) +rte_service_runstate_set(uint32_t id, uint32_t runstate) { - struct rte_service_spec_impl *s = - (struct rte_service_spec_impl *)service; - s->runstate = RUNSTATE_STOPPED; + struct rte_service_spec_impl *s; + SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); + + if (runstate) + s->app_runstate = RUNSTATE_RUNNING; + else + s->app_runstate = RUNSTATE_STOPPED; + rte_smp_wmb(); return 0; } +int32_t +rte_service_runstate_get(uint32_t id) +{ + struct rte_service_spec_impl *s; + SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); + rte_smp_rmb(); + + int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK); + int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0); + + return (s->app_runstate == RUNSTATE_RUNNING) && + (s->comp_runstate == RUNSTATE_RUNNING) && + (check_disabled | lcore_mapped); +} + +static inline void +rte_service_runner_do_callback(struct rte_service_spec_impl *s, + struct core_state *cs, uint32_t service_idx) +{ + void *userdata = s->spec.callback_userdata; + + if (service_stats_enabled(s)) { + uint64_t start = rte_rdtsc(); + s->spec.callback(userdata); + uint64_t end = rte_rdtsc(); + s->cycles_spent += end - start; + cs->calls_per_service[service_idx]++; + s->calls++; + } else + s->spec.callback(userdata); +} + + +static inline int32_t +service_run(uint32_t i, struct core_state *cs, uint64_t service_mask) +{ + if (!service_valid(i)) + return -EINVAL; + struct rte_service_spec_impl *s = &rte_services[i]; + if (s->comp_runstate != RUNSTATE_RUNNING || + s->app_runstate != RUNSTATE_RUNNING || + !(service_mask & (UINT64_C(1) << i))) + return -ENOEXEC; + + /* check do we need cmpset, if MT safe or <= 1 core + * mapped, atomic ops are not required. + */ + const int use_atomics = (service_mt_safe(s) == 0) && + (rte_atomic32_read(&s->num_mapped_cores) > 1); + if (use_atomics) { + if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1)) + return -EBUSY; + + rte_service_runner_do_callback(s, cs, i); + rte_atomic32_clear(&s->execute_lock); + } else + rte_service_runner_do_callback(s, cs, i); + + return 0; +} + +int32_t rte_service_run_iter_on_app_lcore(uint32_t id, + uint32_t serialize_mt_unsafe) +{ + /* run service on calling core, using all-ones as the service mask */ + if (!service_valid(id)) + return -EINVAL; + + struct core_state *cs = &lcore_states[rte_lcore_id()]; + struct rte_service_spec_impl *s = &rte_services[id]; + + /* Atomically add this core to the mapped cores first, then examine if + * we can run the service. This avoids a race condition between + * checking the value, and atomically adding to the mapped count. + */ + if (serialize_mt_unsafe) + rte_atomic32_inc(&s->num_mapped_cores); + + if (service_mt_safe(s) == 0 && + rte_atomic32_read(&s->num_mapped_cores) > 1) { + if (serialize_mt_unsafe) + rte_atomic32_dec(&s->num_mapped_cores); + return -EBUSY; + } + + int ret = service_run(id, cs, UINT64_MAX); + + if (serialize_mt_unsafe) + rte_atomic32_dec(&s->num_mapped_cores); + + return ret; +} + static int32_t rte_service_runner_func(void *arg) { @@ -338,35 +420,10 @@ rte_service_runner_func(void *arg) while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) { const uint64_t service_mask = cs->service_mask; - for (i = 0; i < rte_service_count; i++) { - struct rte_service_spec_impl *s = &rte_services[i]; - if (s->runstate != RUNSTATE_RUNNING || - !(service_mask & (UINT64_C(1) << i))) - continue; - /* check do we need cmpset, if MT safe or <= 1 core - * mapped, atomic ops are not required. - */ - const int need_cmpset = !((service_mt_safe(s) == 0) && - (s->num_mapped_cores > 1)); - uint32_t *lock = (uint32_t *)&s->execute_lock; - - if (need_cmpset || rte_atomic32_cmpset(lock, 0, 1)) { - void *userdata = s->spec.callback_userdata; - - if (service_stats_enabled(s)) { - uint64_t start = rte_rdtsc(); - s->spec.callback(userdata); - uint64_t end = rte_rdtsc(); - s->cycles_spent += end - start; - cs->calls_per_service[i]++; - s->calls++; - } else - s->spec.callback(userdata); - - if (need_cmpset) - rte_atomic32_clear(&s->execute_lock); - } + for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { + /* return value ignored as no change to code flow */ + service_run(i, cs, service_mask); } rte_smp_rmb(); @@ -434,7 +491,7 @@ rte_service_start_with_defaults(void) uint32_t count = rte_service_get_count(); int32_t lcore_iter = 0; - uint32_t ids[RTE_MAX_LCORE]; + uint32_t ids[RTE_MAX_LCORE] = {0}; int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE); if (lcore_count == 0) @@ -444,10 +501,6 @@ rte_service_start_with_defaults(void) rte_service_lcore_start(ids[i]); for (i = 0; i < count; i++) { - struct rte_service_spec *s = rte_service_get_by_id(i); - if (!s) - return -EINVAL; - /* do 1:1 core mapping here, with each service getting * assigned a single core by default. Adding multiple services * should multiplex to a single core, or 1:1 if there are the @@ -461,7 +514,7 @@ rte_service_start_with_defaults(void) if (lcore_iter >= lcore_count) lcore_iter = 0; - ret = rte_service_start(s); + ret = rte_service_runstate_set(i, 1); if (ret) return -ENOEXEC; } @@ -494,15 +547,15 @@ service_update(struct rte_service_spec *service, uint32_t lcore, if (set) { if (*set) { lcore_states[lcore].service_mask |= sid_mask; - rte_services[sid].num_mapped_cores++; + rte_atomic32_inc(&rte_services[sid].num_mapped_cores); } else { lcore_states[lcore].service_mask &= ~(sid_mask); - rte_services[sid].num_mapped_cores--; + rte_atomic32_dec(&rte_services[sid].num_mapped_cores); } } if (enabled) - *enabled = (lcore_states[lcore].service_mask & (sid_mask)); + *enabled = !!(lcore_states[lcore].service_mask & (sid_mask)); rte_smp_wmb(); @@ -540,7 +593,7 @@ int32_t rte_service_lcore_reset_all(void) lcore_states[i].runstate = RUNSTATE_STOPPED; } for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) - rte_services[i].num_mapped_cores = 0; + rte_atomic32_set(&rte_services[i].num_mapped_cores, 0); rte_smp_wmb(); @@ -576,7 +629,8 @@ rte_service_lcore_add(uint32_t lcore) lcore_states[lcore].runstate = RUNSTATE_STOPPED; rte_smp_wmb(); - return 0; + + return rte_eal_wait_lcore(lcore); } int32_t @@ -631,12 +685,12 @@ rte_service_lcore_stop(uint32_t lcore) return -EALREADY; uint32_t i; + uint64_t service_mask = lcore_states[lcore].service_mask; for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { - int32_t enabled = - lcore_states[i].service_mask & (UINT64_C(1) << i); - int32_t service_running = rte_services[i].runstate != - RUNSTATE_STOPPED; - int32_t only_core = rte_services[i].num_mapped_cores == 1; + int32_t enabled = service_mask & (UINT64_C(1) << i); + int32_t service_running = rte_service_runstate_get(i); + int32_t only_core = (1 == + rte_atomic32_read(&rte_services[i].num_mapped_cores)); /* if the core is mapped, and the service is running, and this * is the only core that is mapped, the service would cease to @@ -691,28 +745,34 @@ service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset) fprintf(f, "\n"); } -int32_t rte_service_dump(FILE *f, struct rte_service_spec *service) +int32_t rte_service_dump(FILE *f, uint32_t id) { uint32_t i; + int print_one = (id != UINT32_MAX); uint64_t total_cycles = 0; - for (i = 0; i < rte_service_count; i++) { + + for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { if (!service_valid(i)) continue; total_cycles += rte_services[i].cycles_spent; } - if (service) { - struct rte_service_spec_impl *s = - (struct rte_service_spec_impl *)service; + /* print only the specified service */ + if (print_one) { + struct rte_service_spec_impl *s; + SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); fprintf(f, "Service %s Summary\n", s->spec.name); uint32_t reset = 0; rte_service_dump_one(f, s, total_cycles, reset); return 0; } + /* print all services, as UINT32_MAX was passed as id */ fprintf(f, "Services Summary\n"); - for (i = 0; i < rte_service_count; i++) { + for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { + if (!service_valid(i)) + continue; uint32_t reset = 1; rte_service_dump_one(f, &rte_services[i], total_cycles, reset); } @@ -722,7 +782,7 @@ int32_t rte_service_dump(FILE *f, struct rte_service_spec *service) if (lcore_config[i].core_role != ROLE_SERVICE) continue; - uint32_t reset = 0; + uint32_t reset = 1; service_dump_calls_per_lcore(f, i, reset); }