1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
9 #include <rte_service.h>
10 #include <rte_service_component.h>
12 #include <rte_lcore.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
15 #include <rte_atomic.h>
16 #include <rte_malloc.h>
17 #include <rte_spinlock.h>
19 #include "eal_private.h"
21 #define RTE_SERVICE_NUM_MAX 64
23 #define SERVICE_F_REGISTERED (1 << 0)
24 #define SERVICE_F_STATS_ENABLED (1 << 1)
25 #define SERVICE_F_START_CHECK (1 << 2)
27 /* runstates for services and lcores, denoting if they are active or not */
28 #define RUNSTATE_STOPPED 0
29 #define RUNSTATE_RUNNING 1
31 /* internal representation of a service */
32 struct rte_service_spec_impl {
33 /* public part of the struct */
34 struct rte_service_spec spec;
36 /* spin lock that when set indicates a service core is currently
37 * running this service callback. When not set, a core may take the
38 * lock and then run the service callback.
40 rte_spinlock_t execute_lock;
42 /* API set/get-able variables */
45 uint8_t internal_flags;
47 /* per service statistics */
48 /* Indicates how many cores the service is mapped to run on.
49 * It does not indicate the number of cores the service is running
52 uint32_t num_mapped_cores;
54 uint64_t cycles_spent;
55 } __rte_cache_aligned;
57 /* the internal values of a service core */
59 /* map of services IDs are run on this core */
60 uint64_t service_mask;
61 uint8_t runstate; /* running or stopped */
62 uint8_t thread_active; /* indicates when thread is in service_run() */
63 uint8_t is_service_core; /* set if core is currently a service core */
64 uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
66 uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
67 } __rte_cache_aligned;
69 static uint32_t rte_service_count;
70 static struct rte_service_spec_impl *rte_services;
71 static struct core_state *lcore_states;
72 static uint32_t rte_service_library_initialized;
75 rte_service_init(void)
77 if (rte_service_library_initialized) {
79 "service library init() called, init flag %d\n",
80 rte_service_library_initialized);
84 rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
85 sizeof(struct rte_service_spec_impl),
88 RTE_LOG(ERR, EAL, "error allocating rte services array\n");
92 lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
93 sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
95 RTE_LOG(ERR, EAL, "error allocating core states array\n");
101 struct rte_config *cfg = rte_eal_get_configuration();
102 for (i = 0; i < RTE_MAX_LCORE; i++) {
103 if (lcore_config[i].core_role == ROLE_SERVICE) {
104 if ((unsigned int)i == cfg->main_lcore)
106 rte_service_lcore_add(i);
111 rte_service_library_initialized = 1;
114 rte_free(rte_services);
115 rte_free(lcore_states);
120 rte_service_finalize(void)
122 if (!rte_service_library_initialized)
125 rte_service_lcore_reset_all();
126 rte_eal_mp_wait_lcore();
128 rte_free(rte_services);
129 rte_free(lcore_states);
131 rte_service_library_initialized = 0;
134 /* returns 1 if service is registered and has not been unregistered
135 * Returns 0 if service never registered, or has been unregistered
138 service_valid(uint32_t id)
140 return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
143 static struct rte_service_spec_impl *
144 service_get(uint32_t id)
146 return &rte_services[id];
149 /* validate ID and retrieve service pointer, or return error value */
150 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \
151 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id)) \
153 service = &rte_services[id]; \
156 /* returns 1 if statistics should be collected for service
157 * Returns 0 if statistics should not be collected for service
160 service_stats_enabled(struct rte_service_spec_impl *impl)
162 return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
166 service_mt_safe(struct rte_service_spec_impl *s)
168 return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
172 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
174 struct rte_service_spec_impl *s;
175 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
178 s->internal_flags |= SERVICE_F_STATS_ENABLED;
180 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
186 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
188 struct rte_service_spec_impl *s;
189 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
192 s->internal_flags |= SERVICE_F_START_CHECK;
194 s->internal_flags &= ~(SERVICE_F_START_CHECK);
200 rte_service_get_count(void)
202 return rte_service_count;
206 rte_service_get_by_name(const char *name, uint32_t *service_id)
212 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
213 if (service_valid(i) &&
214 strcmp(name, rte_services[i].spec.name) == 0) {
224 rte_service_get_name(uint32_t id)
226 struct rte_service_spec_impl *s;
227 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
232 rte_service_probe_capability(uint32_t id, uint32_t capability)
234 struct rte_service_spec_impl *s;
235 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
236 return !!(s->spec.capabilities & capability);
240 rte_service_component_register(const struct rte_service_spec *spec,
244 int32_t free_slot = -1;
246 if (spec->callback == NULL || strlen(spec->name) == 0)
249 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
250 if (!service_valid(i)) {
256 if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
259 struct rte_service_spec_impl *s = &rte_services[free_slot];
261 s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
272 rte_service_component_unregister(uint32_t id)
275 struct rte_service_spec_impl *s;
276 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
280 s->internal_flags &= ~(SERVICE_F_REGISTERED);
282 /* clear the run-bit in all cores */
283 for (i = 0; i < RTE_MAX_LCORE; i++)
284 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
286 memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
292 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
294 struct rte_service_spec_impl *s;
295 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
297 /* comp_runstate act as the guard variable. Use store-release
298 * memory order. This synchronizes with load-acquire in
299 * service_run and service_runstate_get function.
302 __atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING,
305 __atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED,
312 rte_service_runstate_set(uint32_t id, uint32_t runstate)
314 struct rte_service_spec_impl *s;
315 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
317 /* app_runstate act as the guard variable. Use store-release
318 * memory order. This synchronizes with load-acquire in
319 * service_run runstate_get function.
322 __atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING,
325 __atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED,
332 rte_service_runstate_get(uint32_t id)
334 struct rte_service_spec_impl *s;
335 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
337 /* comp_runstate and app_runstate act as the guard variables.
338 * Use load-acquire memory order. This synchronizes with
339 * store-release in service state set functions.
341 if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) ==
343 __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) ==
345 int check_disabled = !(s->internal_flags &
346 SERVICE_F_START_CHECK);
347 int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,
348 __ATOMIC_RELAXED) > 0);
350 return (check_disabled | lcore_mapped);
357 service_runner_do_callback(struct rte_service_spec_impl *s,
358 struct core_state *cs, uint32_t service_idx)
360 void *userdata = s->spec.callback_userdata;
362 if (service_stats_enabled(s)) {
363 uint64_t start = rte_rdtsc();
364 s->spec.callback(userdata);
365 uint64_t end = rte_rdtsc();
366 s->cycles_spent += end - start;
367 cs->calls_per_service[service_idx]++;
370 s->spec.callback(userdata);
374 /* Expects the service 's' is valid. */
376 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
377 struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
382 /* comp_runstate and app_runstate act as the guard variables.
383 * Use load-acquire memory order. This synchronizes with
384 * store-release in service state set functions.
386 if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) !=
388 __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) !=
390 !(service_mask & (UINT64_C(1) << i))) {
391 cs->service_active_on_lcore[i] = 0;
395 cs->service_active_on_lcore[i] = 1;
397 if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
398 if (!rte_spinlock_trylock(&s->execute_lock))
401 service_runner_do_callback(s, cs, i);
402 rte_spinlock_unlock(&s->execute_lock);
404 service_runner_do_callback(s, cs, i);
410 rte_service_may_be_active(uint32_t id)
412 uint32_t ids[RTE_MAX_LCORE] = {0};
413 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
416 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))
419 for (i = 0; i < lcore_count; i++) {
420 if (lcore_states[ids[i]].service_active_on_lcore[id])
428 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
430 struct core_state *cs = &lcore_states[rte_lcore_id()];
431 struct rte_service_spec_impl *s;
433 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
435 /* Increment num_mapped_cores to reflect that this core is
436 * now mapped capable of running the service.
438 __atomic_add_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
440 int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
442 __atomic_sub_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
448 service_runner_func(void *arg)
452 const int lcore = rte_lcore_id();
453 struct core_state *cs = &lcore_states[lcore];
455 __atomic_store_n(&cs->thread_active, 1, __ATOMIC_SEQ_CST);
457 /* runstate act as the guard variable. Use load-acquire
458 * memory order here to synchronize with store-release
459 * in runstate update functions.
461 while (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
463 const uint64_t service_mask = cs->service_mask;
465 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
466 if (!service_valid(i))
468 /* return value ignored as no change to code flow */
469 service_run(i, cs, service_mask, service_get(i), 1);
475 /* Use SEQ CST memory ordering to avoid any re-ordering around
476 * this store, ensuring that once this store is visible, the service
477 * lcore thread really is done in service cores code.
479 __atomic_store_n(&cs->thread_active, 0, __ATOMIC_SEQ_CST);
484 rte_service_lcore_may_be_active(uint32_t lcore)
486 if (lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
489 /* Load thread_active using ACQUIRE to avoid instructions dependent on
490 * the result being re-ordered before this load completes.
492 return __atomic_load_n(&lcore_states[lcore].thread_active,
497 rte_service_lcore_count(void)
501 for (i = 0; i < RTE_MAX_LCORE; i++)
502 count += lcore_states[i].is_service_core;
507 rte_service_lcore_list(uint32_t array[], uint32_t n)
509 uint32_t count = rte_service_lcore_count();
518 for (i = 0; i < RTE_MAX_LCORE; i++) {
519 struct core_state *cs = &lcore_states[i];
520 if (cs->is_service_core) {
530 rte_service_lcore_count_services(uint32_t lcore)
532 if (lcore >= RTE_MAX_LCORE)
535 struct core_state *cs = &lcore_states[lcore];
536 if (!cs->is_service_core)
539 return __builtin_popcountll(cs->service_mask);
543 rte_service_start_with_defaults(void)
545 /* create a default mapping from cores to services, then start the
546 * services to make them transparent to unaware applications.
550 uint32_t count = rte_service_get_count();
552 int32_t lcore_iter = 0;
553 uint32_t ids[RTE_MAX_LCORE] = {0};
554 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
556 if (lcore_count == 0)
559 for (i = 0; (int)i < lcore_count; i++)
560 rte_service_lcore_start(ids[i]);
562 for (i = 0; i < count; i++) {
563 /* do 1:1 core mapping here, with each service getting
564 * assigned a single core by default. Adding multiple services
565 * should multiplex to a single core, or 1:1 if there are the
566 * same amount of services as service-cores
568 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
573 if (lcore_iter >= lcore_count)
576 ret = rte_service_runstate_set(i, 1);
585 service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled)
587 /* validate ID, or return error value */
588 if (sid >= RTE_SERVICE_NUM_MAX || !service_valid(sid) ||
589 lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
592 uint64_t sid_mask = UINT64_C(1) << sid;
594 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
597 if (*set && !lcore_mapped) {
598 lcore_states[lcore].service_mask |= sid_mask;
599 __atomic_add_fetch(&rte_services[sid].num_mapped_cores,
600 1, __ATOMIC_RELAXED);
602 if (!*set && lcore_mapped) {
603 lcore_states[lcore].service_mask &= ~(sid_mask);
604 __atomic_sub_fetch(&rte_services[sid].num_mapped_cores,
605 1, __ATOMIC_RELAXED);
610 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
616 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
618 uint32_t on = enabled > 0;
619 return service_update(id, lcore, &on, 0);
623 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
626 int ret = service_update(id, lcore, 0, &enabled);
633 set_lcore_state(uint32_t lcore, int32_t state)
635 /* mark core state in hugepage backed config */
636 struct rte_config *cfg = rte_eal_get_configuration();
637 cfg->lcore_role[lcore] = state;
639 /* mark state in process local lcore_config */
640 lcore_config[lcore].core_role = state;
642 /* update per-lcore optimized state tracking */
643 lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
647 rte_service_lcore_reset_all(void)
649 /* loop over cores, reset all to mask 0 */
651 for (i = 0; i < RTE_MAX_LCORE; i++) {
652 if (lcore_states[i].is_service_core) {
653 lcore_states[i].service_mask = 0;
654 set_lcore_state(i, ROLE_RTE);
655 /* runstate act as guard variable Use
656 * store-release memory order here to synchronize
657 * with load-acquire in runstate read functions.
659 __atomic_store_n(&lcore_states[i].runstate,
660 RUNSTATE_STOPPED, __ATOMIC_RELEASE);
663 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
664 __atomic_store_n(&rte_services[i].num_mapped_cores, 0,
671 rte_service_lcore_add(uint32_t lcore)
673 if (lcore >= RTE_MAX_LCORE)
675 if (lcore_states[lcore].is_service_core)
678 set_lcore_state(lcore, ROLE_SERVICE);
680 /* ensure that after adding a core the mask and state are defaults */
681 lcore_states[lcore].service_mask = 0;
682 /* Use store-release memory order here to synchronize with
683 * load-acquire in runstate read functions.
685 __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
688 return rte_eal_wait_lcore(lcore);
692 rte_service_lcore_del(uint32_t lcore)
694 if (lcore >= RTE_MAX_LCORE)
697 struct core_state *cs = &lcore_states[lcore];
698 if (!cs->is_service_core)
701 /* runstate act as the guard variable. Use load-acquire
702 * memory order here to synchronize with store-release
703 * in runstate update functions.
705 if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) !=
709 set_lcore_state(lcore, ROLE_RTE);
716 rte_service_lcore_start(uint32_t lcore)
718 if (lcore >= RTE_MAX_LCORE)
721 struct core_state *cs = &lcore_states[lcore];
722 if (!cs->is_service_core)
725 /* runstate act as the guard variable. Use load-acquire
726 * memory order here to synchronize with store-release
727 * in runstate update functions.
729 if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
733 /* set core to run state first, and then launch otherwise it will
734 * return immediately as runstate keeps it in the service poll loop
736 /* Use load-acquire memory order here to synchronize with
737 * store-release in runstate update functions.
739 __atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE);
741 int ret = rte_eal_remote_launch(service_runner_func, 0, lcore);
742 /* returns -EBUSY if the core is already launched, 0 on success */
747 rte_service_lcore_stop(uint32_t lcore)
749 if (lcore >= RTE_MAX_LCORE)
752 /* runstate act as the guard variable. Use load-acquire
753 * memory order here to synchronize with store-release
754 * in runstate update functions.
756 if (__atomic_load_n(&lcore_states[lcore].runstate, __ATOMIC_ACQUIRE) ==
761 uint64_t service_mask = lcore_states[lcore].service_mask;
762 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
763 int32_t enabled = service_mask & (UINT64_C(1) << i);
764 int32_t service_running = rte_service_runstate_get(i);
765 int32_t only_core = (1 ==
766 __atomic_load_n(&rte_services[i].num_mapped_cores,
769 /* if the core is mapped, and the service is running, and this
770 * is the only core that is mapped, the service would cease to
771 * run if this core stopped, so fail instead.
773 if (enabled && service_running && only_core)
777 /* Use store-release memory order here to synchronize with
778 * load-acquire in runstate read functions.
780 __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
787 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
789 struct rte_service_spec_impl *s;
790 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
796 case RTE_SERVICE_ATTR_CYCLES:
797 *attr_value = s->cycles_spent;
799 case RTE_SERVICE_ATTR_CALL_COUNT:
800 *attr_value = s->calls;
808 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
809 uint64_t *attr_value)
811 struct core_state *cs;
813 if (lcore >= RTE_MAX_LCORE || !attr_value)
816 cs = &lcore_states[lcore];
817 if (!cs->is_service_core)
821 case RTE_SERVICE_LCORE_ATTR_LOOPS:
822 *attr_value = cs->loops;
830 rte_service_attr_reset_all(uint32_t id)
832 struct rte_service_spec_impl *s;
833 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
841 rte_service_lcore_attr_reset_all(uint32_t lcore)
843 struct core_state *cs;
845 if (lcore >= RTE_MAX_LCORE)
848 cs = &lcore_states[lcore];
849 if (!cs->is_service_core)
858 service_dump_one(FILE *f, struct rte_service_spec_impl *s)
860 /* avoid divide by zero */
865 fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %"
866 PRIu64"\tavg: %"PRIu64"\n",
867 s->spec.name, service_stats_enabled(s), s->calls,
868 s->cycles_spent, s->cycles_spent / calls);
872 service_dump_calls_per_lcore(FILE *f, uint32_t lcore)
875 struct core_state *cs = &lcore_states[lcore];
877 fprintf(f, "%02d\t", lcore);
878 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
879 if (!service_valid(i))
881 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
887 rte_service_dump(FILE *f, uint32_t id)
890 int print_one = (id != UINT32_MAX);
892 /* print only the specified service */
894 struct rte_service_spec_impl *s;
895 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
896 fprintf(f, "Service %s Summary\n", s->spec.name);
897 service_dump_one(f, s);
901 /* print all services, as UINT32_MAX was passed as id */
902 fprintf(f, "Services Summary\n");
903 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
904 if (!service_valid(i))
906 service_dump_one(f, &rte_services[i]);
909 fprintf(f, "Service Cores Summary\n");
910 for (i = 0; i < RTE_MAX_LCORE; i++) {
911 if (lcore_config[i].core_role != ROLE_SERVICE)
914 service_dump_calls_per_lcore(f, i);