-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
*/
#include <stdio.h>
#define SERVICE_F_REGISTERED (1 << 0)
#define SERVICE_F_STATS_ENABLED (1 << 1)
+#define SERVICE_F_START_CHECK (1 << 2)
/* runstates for services and lcores, denoting if they are active or not */
#define RUNSTATE_STOPPED 0
rte_atomic32_t execute_lock;
/* API set/get-able variables */
- int32_t runstate;
+ int8_t app_runstate;
+ int8_t comp_runstate;
uint8_t internal_flags;
/* per service statistics */
- uint32_t num_mapped_cores;
+ rte_atomic32_t num_mapped_cores;
uint64_t calls;
uint64_t cycles_spent;
} __rte_cache_aligned;
service = &rte_services[id]; \
} while (0)
-/* returns 1 if statistics should be colleced for service
+/* returns 1 if statistics should be collected for service
* Returns 0 if statistics should not be collected for service
*/
static inline int
return 0;
}
+int32_t rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
+{
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
+
+ if (enabled)
+ s->internal_flags |= SERVICE_F_START_CHECK;
+ else
+ s->internal_flags &= ~(SERVICE_F_START_CHECK);
+
+ return 0;
+}
+
uint32_t
rte_service_get_count(void)
{
struct rte_service_spec_impl *s = &rte_services[free_slot];
s->spec = *spec;
- s->internal_flags |= SERVICE_F_REGISTERED;
+ s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
rte_smp_wmb();
rte_service_count++;
return 0;
}
+int32_t
+rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
+{
+ struct rte_service_spec_impl *s;
+ SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+
+ if (runstate)
+ s->comp_runstate = RUNSTATE_RUNNING;
+ else
+ s->comp_runstate = RUNSTATE_STOPPED;
+
+ rte_smp_wmb();
+ return 0;
+}
+
int32_t
rte_service_runstate_set(uint32_t id, uint32_t runstate)
{
SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
if (runstate)
- s->runstate = RUNSTATE_RUNNING;
+ s->app_runstate = RUNSTATE_RUNNING;
else
- s->runstate = RUNSTATE_STOPPED;
+ s->app_runstate = RUNSTATE_STOPPED;
rte_smp_wmb();
return 0;
{
struct rte_service_spec_impl *s;
SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
+ rte_smp_rmb();
+
+ int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
+ int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
- return (s->runstate == RUNSTATE_RUNNING) && (s->num_mapped_cores > 0);
+ return (s->app_runstate == RUNSTATE_RUNNING) &&
+ (s->comp_runstate == RUNSTATE_RUNNING) &&
+ (check_disabled | lcore_mapped);
}
static inline void
s->spec.callback(userdata);
}
+
+static inline int32_t
+service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
+{
+ if (!service_valid(i))
+ return -EINVAL;
+ struct rte_service_spec_impl *s = &rte_services[i];
+ if (s->comp_runstate != RUNSTATE_RUNNING ||
+ s->app_runstate != RUNSTATE_RUNNING ||
+ !(service_mask & (UINT64_C(1) << i)))
+ return -ENOEXEC;
+
+ /* check do we need cmpset, if MT safe or <= 1 core
+ * mapped, atomic ops are not required.
+ */
+ const int use_atomics = (service_mt_safe(s) == 0) &&
+ (rte_atomic32_read(&s->num_mapped_cores) > 1);
+ if (use_atomics) {
+ if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
+ return -EBUSY;
+
+ rte_service_runner_do_callback(s, cs, i);
+ rte_atomic32_clear(&s->execute_lock);
+ } else
+ rte_service_runner_do_callback(s, cs, i);
+
+ return 0;
+}
+
+int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
+ uint32_t serialize_mt_unsafe)
+{
+ /* run service on calling core, using all-ones as the service mask */
+ if (!service_valid(id))
+ return -EINVAL;
+
+ struct core_state *cs = &lcore_states[rte_lcore_id()];
+ struct rte_service_spec_impl *s = &rte_services[id];
+
+ /* Atomically add this core to the mapped cores first, then examine if
+ * we can run the service. This avoids a race condition between
+ * checking the value, and atomically adding to the mapped count.
+ */
+ if (serialize_mt_unsafe)
+ rte_atomic32_inc(&s->num_mapped_cores);
+
+ if (service_mt_safe(s) == 0 &&
+ rte_atomic32_read(&s->num_mapped_cores) > 1) {
+ if (serialize_mt_unsafe)
+ rte_atomic32_dec(&s->num_mapped_cores);
+ return -EBUSY;
+ }
+
+ int ret = service_run(id, cs, UINT64_MAX);
+
+ if (serialize_mt_unsafe)
+ rte_atomic32_dec(&s->num_mapped_cores);
+
+ return ret;
+}
+
static int32_t
rte_service_runner_func(void *arg)
{
const uint64_t service_mask = cs->service_mask;
for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
- if (!service_valid(i))
- continue;
- struct rte_service_spec_impl *s = &rte_services[i];
- if (s->runstate != RUNSTATE_RUNNING ||
- !(service_mask & (UINT64_C(1) << i)))
- continue;
-
- /* check do we need cmpset, if MT safe or <= 1 core
- * mapped, atomic ops are not required.
- */
- const int use_atomics = (service_mt_safe(s) == 0) &&
- (s->num_mapped_cores > 1);
- if (use_atomics) {
- uint32_t *lock = (uint32_t *)&s->execute_lock;
- if (rte_atomic32_cmpset(lock, 0, 1)) {
- rte_service_runner_do_callback(s, cs, i);
- rte_atomic32_clear(&s->execute_lock);
- }
- } else
- rte_service_runner_do_callback(s, cs, i);
+ /* return value ignored as no change to code flow */
+ service_run(i, cs, service_mask);
}
rte_smp_rmb();
uint32_t count = rte_service_get_count();
int32_t lcore_iter = 0;
- uint32_t ids[RTE_MAX_LCORE];
+ uint32_t ids[RTE_MAX_LCORE] = {0};
int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
if (lcore_count == 0)
uint64_t sid_mask = UINT64_C(1) << sid;
if (set) {
- if (*set) {
+ uint64_t lcore_mapped = lcore_states[lcore].service_mask &
+ sid_mask;
+
+ if (*set && !lcore_mapped) {
lcore_states[lcore].service_mask |= sid_mask;
- rte_services[sid].num_mapped_cores++;
- } else {
+ rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
+ }
+ if (!*set && lcore_mapped) {
lcore_states[lcore].service_mask &= ~(sid_mask);
- rte_services[sid].num_mapped_cores--;
+ rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
}
}
return ret;
}
-int32_t rte_service_lcore_reset_all(void)
-{
- /* loop over cores, reset all to mask 0 */
- uint32_t i;
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- lcore_states[i].service_mask = 0;
- lcore_states[i].is_service_core = 0;
- lcore_states[i].runstate = RUNSTATE_STOPPED;
- }
- for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
- rte_services[i].num_mapped_cores = 0;
-
- rte_smp_wmb();
-
- return 0;
-}
-
static void
set_lcore_state(uint32_t lcore, int32_t state)
{
lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
}
+int32_t rte_service_lcore_reset_all(void)
+{
+ /* loop over cores, reset all to mask 0 */
+ uint32_t i;
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (lcore_states[i].is_service_core) {
+ lcore_states[i].service_mask = 0;
+ set_lcore_state(i, ROLE_RTE);
+ lcore_states[i].runstate = RUNSTATE_STOPPED;
+ }
+ }
+ for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
+ rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
+
+ rte_smp_wmb();
+
+ return 0;
+}
+
int32_t
rte_service_lcore_add(uint32_t lcore)
{
lcore_states[lcore].runstate = RUNSTATE_STOPPED;
rte_smp_wmb();
- return 0;
+
+ return rte_eal_wait_lcore(lcore);
}
int32_t
return -EALREADY;
uint32_t i;
+ uint64_t service_mask = lcore_states[lcore].service_mask;
for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
- int32_t enabled =
- lcore_states[i].service_mask & (UINT64_C(1) << i);
- int32_t service_running = rte_services[i].runstate !=
- RUNSTATE_STOPPED;
- int32_t only_core = rte_services[i].num_mapped_cores == 1;
+ int32_t enabled = service_mask & (UINT64_C(1) << i);
+ int32_t service_running = rte_service_runstate_get(i);
+ int32_t only_core = (1 ==
+ rte_atomic32_read(&rte_services[i].num_mapped_cores));
/* if the core is mapped, and the service is running, and this
* is the only core that is mapped, the service would cease to
if (lcore_config[i].core_role != ROLE_SERVICE)
continue;
- uint32_t reset = 0;
+ uint32_t reset = 1;
service_dump_calls_per_lcore(f, i, reset);
}