X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Frte_service.c;h=ae97e6b7fbfd84d9062f6e843bfa91077620e2bc;hb=e32cb57973fc311b4b5f60ae5dac37d99e48c94d;hp=e598e1639699371fdd12279ded2eb11c1a072a33;hpb=ca13673513b656e632d5b348de2aa448b55ba9de;p=dpdk.git diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c index e598e16396..ae97e6b7fb 100644 --- a/lib/librte_eal/common/rte_service.c +++ b/lib/librte_eal/common/rte_service.c @@ -54,6 +54,7 @@ #define SERVICE_F_REGISTERED (1 << 0) #define SERVICE_F_STATS_ENABLED (1 << 1) +#define SERVICE_F_START_CHECK (1 << 2) /* runstates for services and lcores, denoting if they are active or not */ #define RUNSTATE_STOPPED 0 @@ -76,7 +77,7 @@ struct rte_service_spec_impl { uint8_t internal_flags; /* per service statistics */ - uint32_t num_mapped_cores; + rte_atomic32_t num_mapped_cores; uint64_t calls; uint64_t cycles_spent; } __rte_cache_aligned; @@ -152,7 +153,7 @@ service_valid(uint32_t id) service = &rte_services[id]; \ } while (0) -/* returns 1 if statistics should be colleced for service +/* returns 1 if statistics should be collected for service * Returns 0 if statistics should not be collected for service */ static inline int @@ -180,6 +181,19 @@ int32_t rte_service_set_stats_enable(uint32_t id, int32_t enabled) return 0; } +int32_t rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled) +{ + struct rte_service_spec_impl *s; + SERVICE_VALID_GET_OR_ERR_RET(id, s, 0); + + if (enabled) + s->internal_flags |= SERVICE_F_START_CHECK; + else + s->internal_flags &= ~(SERVICE_F_START_CHECK); + + return 0; +} + uint32_t rte_service_get_count(void) { @@ -241,7 +255,7 @@ rte_service_component_register(const struct rte_service_spec *spec, struct rte_service_spec_impl *s = &rte_services[free_slot]; s->spec = *spec; - s->internal_flags |= SERVICE_F_REGISTERED; + s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK; rte_smp_wmb(); rte_service_count++; @@ -309,9 +323,13 @@ rte_service_runstate_get(uint32_t id) struct rte_service_spec_impl *s; SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL); rte_smp_rmb(); + + int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK); + int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0); + return (s->app_runstate == RUNSTATE_RUNNING) && (s->comp_runstate == RUNSTATE_RUNNING) && - (s->num_mapped_cores > 0); + (check_disabled | lcore_mapped); } static inline void @@ -331,6 +349,67 @@ rte_service_runner_do_callback(struct rte_service_spec_impl *s, s->spec.callback(userdata); } + +static inline int32_t +service_run(uint32_t i, struct core_state *cs, uint64_t service_mask) +{ + if (!service_valid(i)) + return -EINVAL; + struct rte_service_spec_impl *s = &rte_services[i]; + if (s->comp_runstate != RUNSTATE_RUNNING || + s->app_runstate != RUNSTATE_RUNNING || + !(service_mask & (UINT64_C(1) << i))) + return -ENOEXEC; + + /* check do we need cmpset, if MT safe or <= 1 core + * mapped, atomic ops are not required. + */ + const int use_atomics = (service_mt_safe(s) == 0) && + (rte_atomic32_read(&s->num_mapped_cores) > 1); + if (use_atomics) { + if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1)) + return -EBUSY; + + rte_service_runner_do_callback(s, cs, i); + rte_atomic32_clear(&s->execute_lock); + } else + rte_service_runner_do_callback(s, cs, i); + + return 0; +} + +int32_t rte_service_run_iter_on_app_lcore(uint32_t id, + uint32_t serialize_mt_unsafe) +{ + /* run service on calling core, using all-ones as the service mask */ + if (!service_valid(id)) + return -EINVAL; + + struct core_state *cs = &lcore_states[rte_lcore_id()]; + struct rte_service_spec_impl *s = &rte_services[id]; + + /* Atomically add this core to the mapped cores first, then examine if + * we can run the service. This avoids a race condition between + * checking the value, and atomically adding to the mapped count. + */ + if (serialize_mt_unsafe) + rte_atomic32_inc(&s->num_mapped_cores); + + if (service_mt_safe(s) == 0 && + rte_atomic32_read(&s->num_mapped_cores) > 1) { + if (serialize_mt_unsafe) + rte_atomic32_dec(&s->num_mapped_cores); + return -EBUSY; + } + + int ret = service_run(id, cs, UINT64_MAX); + + if (serialize_mt_unsafe) + rte_atomic32_dec(&s->num_mapped_cores); + + return ret; +} + static int32_t rte_service_runner_func(void *arg) { @@ -343,27 +422,8 @@ rte_service_runner_func(void *arg) const uint64_t service_mask = cs->service_mask; for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { - if (!service_valid(i)) - continue; - struct rte_service_spec_impl *s = &rte_services[i]; - if (s->comp_runstate != RUNSTATE_RUNNING || - s->app_runstate != RUNSTATE_RUNNING || - !(service_mask & (UINT64_C(1) << i))) - continue; - - /* check do we need cmpset, if MT safe or <= 1 core - * mapped, atomic ops are not required. - */ - const int use_atomics = (service_mt_safe(s) == 0) && - (s->num_mapped_cores > 1); - if (use_atomics) { - uint32_t *lock = (uint32_t *)&s->execute_lock; - if (rte_atomic32_cmpset(lock, 0, 1)) { - rte_service_runner_do_callback(s, cs, i); - rte_atomic32_clear(&s->execute_lock); - } - } else - rte_service_runner_do_callback(s, cs, i); + /* return value ignored as no change to code flow */ + service_run(i, cs, service_mask); } rte_smp_rmb(); @@ -487,10 +547,10 @@ service_update(struct rte_service_spec *service, uint32_t lcore, if (set) { if (*set) { lcore_states[lcore].service_mask |= sid_mask; - rte_services[sid].num_mapped_cores++; + rte_atomic32_inc(&rte_services[sid].num_mapped_cores); } else { lcore_states[lcore].service_mask &= ~(sid_mask); - rte_services[sid].num_mapped_cores--; + rte_atomic32_dec(&rte_services[sid].num_mapped_cores); } } @@ -533,7 +593,7 @@ int32_t rte_service_lcore_reset_all(void) lcore_states[i].runstate = RUNSTATE_STOPPED; } for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) - rte_services[i].num_mapped_cores = 0; + rte_atomic32_set(&rte_services[i].num_mapped_cores, 0); rte_smp_wmb(); @@ -629,7 +689,8 @@ rte_service_lcore_stop(uint32_t lcore) for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) { int32_t enabled = service_mask & (UINT64_C(1) << i); int32_t service_running = rte_service_runstate_get(i); - int32_t only_core = rte_services[i].num_mapped_cores == 1; + int32_t only_core = (1 == + rte_atomic32_read(&rte_services[i].num_mapped_cores)); /* if the core is mapped, and the service is running, and this * is the only core that is mapped, the service would cease to