1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
11 #include <rte_compat.h>
12 #include <rte_service.h>
13 #include <rte_service_component.h>
16 #include <rte_lcore.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_cycles.h>
20 #include <rte_atomic.h>
21 #include <rte_memory.h>
22 #include <rte_malloc.h>
23 #include <rte_spinlock.h>
25 #include "eal_private.h"
27 #define RTE_SERVICE_NUM_MAX 64
29 #define SERVICE_F_REGISTERED (1 << 0)
30 #define SERVICE_F_STATS_ENABLED (1 << 1)
31 #define SERVICE_F_START_CHECK (1 << 2)
33 /* runstates for services and lcores, denoting if they are active or not */
34 #define RUNSTATE_STOPPED 0
35 #define RUNSTATE_RUNNING 1
37 /* internal representation of a service */
38 struct rte_service_spec_impl {
39 /* public part of the struct */
40 struct rte_service_spec spec;
42 /* spin lock that when set indicates a service core is currently
43 * running this service callback. When not set, a core may take the
44 * lock and then run the service callback.
46 rte_spinlock_t execute_lock;
48 /* API set/get-able variables */
51 uint8_t internal_flags;
53 /* per service statistics */
54 /* Indicates how many cores the service is mapped to run on.
55 * It does not indicate the number of cores the service is running
58 uint32_t num_mapped_cores;
60 uint64_t cycles_spent;
61 } __rte_cache_aligned;
63 /* the internal values of a service core */
65 /* map of services IDs are run on this core */
66 uint64_t service_mask;
67 uint8_t runstate; /* running or stopped */
68 uint8_t thread_active; /* indicates when thread is in service_run() */
69 uint8_t is_service_core; /* set if core is currently a service core */
70 uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
72 uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
73 } __rte_cache_aligned;
75 static uint32_t rte_service_count;
76 static struct rte_service_spec_impl *rte_services;
77 static struct core_state *lcore_states;
78 static uint32_t rte_service_library_initialized;
81 rte_service_init(void)
83 if (rte_service_library_initialized) {
85 "service library init() called, init flag %d\n",
86 rte_service_library_initialized);
90 rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
91 sizeof(struct rte_service_spec_impl),
94 RTE_LOG(ERR, EAL, "error allocating rte services array\n");
98 lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
99 sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
101 RTE_LOG(ERR, EAL, "error allocating core states array\n");
107 struct rte_config *cfg = rte_eal_get_configuration();
108 for (i = 0; i < RTE_MAX_LCORE; i++) {
109 if (lcore_config[i].core_role == ROLE_SERVICE) {
110 if ((unsigned int)i == cfg->master_lcore)
112 rte_service_lcore_add(i);
117 rte_service_library_initialized = 1;
120 rte_free(rte_services);
121 rte_free(lcore_states);
126 rte_service_finalize(void)
128 if (!rte_service_library_initialized)
131 rte_service_lcore_reset_all();
132 rte_eal_mp_wait_lcore();
134 rte_free(rte_services);
135 rte_free(lcore_states);
137 rte_service_library_initialized = 0;
140 /* returns 1 if service is registered and has not been unregistered
141 * Returns 0 if service never registered, or has been unregistered
144 service_valid(uint32_t id)
146 return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
149 static struct rte_service_spec_impl *
150 service_get(uint32_t id)
152 return &rte_services[id];
155 /* validate ID and retrieve service pointer, or return error value */
156 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \
157 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id)) \
159 service = &rte_services[id]; \
162 /* returns 1 if statistics should be collected for service
163 * Returns 0 if statistics should not be collected for service
166 service_stats_enabled(struct rte_service_spec_impl *impl)
168 return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
172 service_mt_safe(struct rte_service_spec_impl *s)
174 return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
178 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
180 struct rte_service_spec_impl *s;
181 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
184 s->internal_flags |= SERVICE_F_STATS_ENABLED;
186 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
192 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
194 struct rte_service_spec_impl *s;
195 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
198 s->internal_flags |= SERVICE_F_START_CHECK;
200 s->internal_flags &= ~(SERVICE_F_START_CHECK);
206 rte_service_get_count(void)
208 return rte_service_count;
212 rte_service_get_by_name(const char *name, uint32_t *service_id)
218 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
219 if (service_valid(i) &&
220 strcmp(name, rte_services[i].spec.name) == 0) {
230 rte_service_get_name(uint32_t id)
232 struct rte_service_spec_impl *s;
233 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
238 rte_service_probe_capability(uint32_t id, uint32_t capability)
240 struct rte_service_spec_impl *s;
241 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
242 return !!(s->spec.capabilities & capability);
246 rte_service_component_register(const struct rte_service_spec *spec,
250 int32_t free_slot = -1;
252 if (spec->callback == NULL || strlen(spec->name) == 0)
255 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
256 if (!service_valid(i)) {
262 if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
265 struct rte_service_spec_impl *s = &rte_services[free_slot];
267 s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
278 rte_service_component_unregister(uint32_t id)
281 struct rte_service_spec_impl *s;
282 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
286 s->internal_flags &= ~(SERVICE_F_REGISTERED);
288 /* clear the run-bit in all cores */
289 for (i = 0; i < RTE_MAX_LCORE; i++)
290 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
292 memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
298 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
300 struct rte_service_spec_impl *s;
301 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
303 /* comp_runstate act as the guard variable. Use store-release
304 * memory order. This synchronizes with load-acquire in
305 * service_run and service_runstate_get function.
308 __atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING,
311 __atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED,
318 rte_service_runstate_set(uint32_t id, uint32_t runstate)
320 struct rte_service_spec_impl *s;
321 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
323 /* app_runstate act as the guard variable. Use store-release
324 * memory order. This synchronizes with load-acquire in
325 * service_run runstate_get function.
328 __atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING,
331 __atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED,
338 rte_service_runstate_get(uint32_t id)
340 struct rte_service_spec_impl *s;
341 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
343 /* comp_runstate and app_runstate act as the guard variables.
344 * Use load-acquire memory order. This synchronizes with
345 * store-release in service state set functions.
347 if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) ==
349 __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) ==
351 int check_disabled = !(s->internal_flags &
352 SERVICE_F_START_CHECK);
353 int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,
354 __ATOMIC_RELAXED) > 0);
356 return (check_disabled | lcore_mapped);
363 service_runner_do_callback(struct rte_service_spec_impl *s,
364 struct core_state *cs, uint32_t service_idx)
366 void *userdata = s->spec.callback_userdata;
368 if (service_stats_enabled(s)) {
369 uint64_t start = rte_rdtsc();
370 s->spec.callback(userdata);
371 uint64_t end = rte_rdtsc();
372 s->cycles_spent += end - start;
373 cs->calls_per_service[service_idx]++;
376 s->spec.callback(userdata);
380 /* Expects the service 's' is valid. */
382 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
383 struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
388 /* comp_runstate and app_runstate act as the guard variables.
389 * Use load-acquire memory order. This synchronizes with
390 * store-release in service state set functions.
392 if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) !=
394 __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) !=
396 !(service_mask & (UINT64_C(1) << i))) {
397 cs->service_active_on_lcore[i] = 0;
401 cs->service_active_on_lcore[i] = 1;
403 if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
404 if (!rte_spinlock_trylock(&s->execute_lock))
407 service_runner_do_callback(s, cs, i);
408 rte_spinlock_unlock(&s->execute_lock);
410 service_runner_do_callback(s, cs, i);
416 rte_service_may_be_active(uint32_t id)
418 uint32_t ids[RTE_MAX_LCORE] = {0};
419 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
422 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))
425 for (i = 0; i < lcore_count; i++) {
426 if (lcore_states[ids[i]].service_active_on_lcore[id])
434 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
436 struct core_state *cs = &lcore_states[rte_lcore_id()];
437 struct rte_service_spec_impl *s;
439 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
441 /* Increment num_mapped_cores to reflect that this core is
442 * now mapped capable of running the service.
444 __atomic_add_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
446 int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
448 __atomic_sub_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
454 service_runner_func(void *arg)
458 const int lcore = rte_lcore_id();
459 struct core_state *cs = &lcore_states[lcore];
461 __atomic_store_n(&cs->thread_active, 1, __ATOMIC_SEQ_CST);
463 /* runstate act as the guard variable. Use load-acquire
464 * memory order here to synchronize with store-release
465 * in runstate update functions.
467 while (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
469 const uint64_t service_mask = cs->service_mask;
471 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
472 if (!service_valid(i))
474 /* return value ignored as no change to code flow */
475 service_run(i, cs, service_mask, service_get(i), 1);
481 /* Use SEQ CST memory ordering to avoid any re-ordering around
482 * this store, ensuring that once this store is visible, the service
483 * lcore thread really is done in service cores code.
485 __atomic_store_n(&cs->thread_active, 0, __ATOMIC_SEQ_CST);
490 rte_service_lcore_may_be_active(uint32_t lcore)
492 if (lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
495 /* Load thread_active using ACQUIRE to avoid instructions dependent on
496 * the result being re-ordered before this load completes.
498 return __atomic_load_n(&lcore_states[lcore].thread_active,
503 rte_service_lcore_count(void)
507 for (i = 0; i < RTE_MAX_LCORE; i++)
508 count += lcore_states[i].is_service_core;
513 rte_service_lcore_list(uint32_t array[], uint32_t n)
515 uint32_t count = rte_service_lcore_count();
524 for (i = 0; i < RTE_MAX_LCORE; i++) {
525 struct core_state *cs = &lcore_states[i];
526 if (cs->is_service_core) {
536 rte_service_lcore_count_services(uint32_t lcore)
538 if (lcore >= RTE_MAX_LCORE)
541 struct core_state *cs = &lcore_states[lcore];
542 if (!cs->is_service_core)
545 return __builtin_popcountll(cs->service_mask);
549 rte_service_start_with_defaults(void)
551 /* create a default mapping from cores to services, then start the
552 * services to make them transparent to unaware applications.
556 uint32_t count = rte_service_get_count();
558 int32_t lcore_iter = 0;
559 uint32_t ids[RTE_MAX_LCORE] = {0};
560 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
562 if (lcore_count == 0)
565 for (i = 0; (int)i < lcore_count; i++)
566 rte_service_lcore_start(ids[i]);
568 for (i = 0; i < count; i++) {
569 /* do 1:1 core mapping here, with each service getting
570 * assigned a single core by default. Adding multiple services
571 * should multiplex to a single core, or 1:1 if there are the
572 * same amount of services as service-cores
574 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
579 if (lcore_iter >= lcore_count)
582 ret = rte_service_runstate_set(i, 1);
591 service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled)
593 /* validate ID, or return error value */
594 if (sid >= RTE_SERVICE_NUM_MAX || !service_valid(sid) ||
595 lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
598 uint64_t sid_mask = UINT64_C(1) << sid;
600 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
603 if (*set && !lcore_mapped) {
604 lcore_states[lcore].service_mask |= sid_mask;
605 __atomic_add_fetch(&rte_services[sid].num_mapped_cores,
606 1, __ATOMIC_RELAXED);
608 if (!*set && lcore_mapped) {
609 lcore_states[lcore].service_mask &= ~(sid_mask);
610 __atomic_sub_fetch(&rte_services[sid].num_mapped_cores,
611 1, __ATOMIC_RELAXED);
616 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
622 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
624 uint32_t on = enabled > 0;
625 return service_update(id, lcore, &on, 0);
629 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
632 int ret = service_update(id, lcore, 0, &enabled);
639 set_lcore_state(uint32_t lcore, int32_t state)
641 /* mark core state in hugepage backed config */
642 struct rte_config *cfg = rte_eal_get_configuration();
643 cfg->lcore_role[lcore] = state;
645 /* mark state in process local lcore_config */
646 lcore_config[lcore].core_role = state;
648 /* update per-lcore optimized state tracking */
649 lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
653 rte_service_lcore_reset_all(void)
655 /* loop over cores, reset all to mask 0 */
657 for (i = 0; i < RTE_MAX_LCORE; i++) {
658 if (lcore_states[i].is_service_core) {
659 lcore_states[i].service_mask = 0;
660 set_lcore_state(i, ROLE_RTE);
661 /* runstate act as guard variable Use
662 * store-release memory order here to synchronize
663 * with load-acquire in runstate read functions.
665 __atomic_store_n(&lcore_states[i].runstate,
666 RUNSTATE_STOPPED, __ATOMIC_RELEASE);
669 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
670 __atomic_store_n(&rte_services[i].num_mapped_cores, 0,
677 rte_service_lcore_add(uint32_t lcore)
679 if (lcore >= RTE_MAX_LCORE)
681 if (lcore_states[lcore].is_service_core)
684 set_lcore_state(lcore, ROLE_SERVICE);
686 /* ensure that after adding a core the mask and state are defaults */
687 lcore_states[lcore].service_mask = 0;
688 /* Use store-release memory order here to synchronize with
689 * load-acquire in runstate read functions.
691 __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
694 return rte_eal_wait_lcore(lcore);
698 rte_service_lcore_del(uint32_t lcore)
700 if (lcore >= RTE_MAX_LCORE)
703 struct core_state *cs = &lcore_states[lcore];
704 if (!cs->is_service_core)
707 /* runstate act as the guard variable. Use load-acquire
708 * memory order here to synchronize with store-release
709 * in runstate update functions.
711 if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) !=
715 set_lcore_state(lcore, ROLE_RTE);
722 rte_service_lcore_start(uint32_t lcore)
724 if (lcore >= RTE_MAX_LCORE)
727 struct core_state *cs = &lcore_states[lcore];
728 if (!cs->is_service_core)
731 /* runstate act as the guard variable. Use load-acquire
732 * memory order here to synchronize with store-release
733 * in runstate update functions.
735 if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
739 /* set core to run state first, and then launch otherwise it will
740 * return immediately as runstate keeps it in the service poll loop
742 /* Use load-acquire memory order here to synchronize with
743 * store-release in runstate update functions.
745 __atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE);
747 int ret = rte_eal_remote_launch(service_runner_func, 0, lcore);
748 /* returns -EBUSY if the core is already launched, 0 on success */
753 rte_service_lcore_stop(uint32_t lcore)
755 if (lcore >= RTE_MAX_LCORE)
758 /* runstate act as the guard variable. Use load-acquire
759 * memory order here to synchronize with store-release
760 * in runstate update functions.
762 if (__atomic_load_n(&lcore_states[lcore].runstate, __ATOMIC_ACQUIRE) ==
767 uint64_t service_mask = lcore_states[lcore].service_mask;
768 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
769 int32_t enabled = service_mask & (UINT64_C(1) << i);
770 int32_t service_running = rte_service_runstate_get(i);
771 int32_t only_core = (1 ==
772 __atomic_load_n(&rte_services[i].num_mapped_cores,
775 /* if the core is mapped, and the service is running, and this
776 * is the only core that is mapped, the service would cease to
777 * run if this core stopped, so fail instead.
779 if (enabled && service_running && only_core)
783 /* Use store-release memory order here to synchronize with
784 * load-acquire in runstate read functions.
786 __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
793 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
795 struct rte_service_spec_impl *s;
796 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
802 case RTE_SERVICE_ATTR_CYCLES:
803 *attr_value = s->cycles_spent;
805 case RTE_SERVICE_ATTR_CALL_COUNT:
806 *attr_value = s->calls;
814 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
815 uint64_t *attr_value)
817 struct core_state *cs;
819 if (lcore >= RTE_MAX_LCORE || !attr_value)
822 cs = &lcore_states[lcore];
823 if (!cs->is_service_core)
827 case RTE_SERVICE_LCORE_ATTR_LOOPS:
828 *attr_value = cs->loops;
836 service_dump_one(FILE *f, struct rte_service_spec_impl *s, uint32_t reset)
838 /* avoid divide by zero */
852 fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %"
853 PRIu64"\tavg: %"PRIu64"\n",
854 s->spec.name, service_stats_enabled(s), s->calls,
855 s->cycles_spent, s->cycles_spent / calls);
859 rte_service_attr_reset_all(uint32_t id)
861 struct rte_service_spec_impl *s;
862 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
865 service_dump_one(NULL, s, reset);
870 rte_service_lcore_attr_reset_all(uint32_t lcore)
872 struct core_state *cs;
874 if (lcore >= RTE_MAX_LCORE)
877 cs = &lcore_states[lcore];
878 if (!cs->is_service_core)
887 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
890 struct core_state *cs = &lcore_states[lcore];
892 fprintf(f, "%02d\t", lcore);
893 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
894 if (!service_valid(i))
896 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
898 cs->calls_per_service[i] = 0;
904 rte_service_dump(FILE *f, uint32_t id)
907 int print_one = (id != UINT32_MAX);
909 /* print only the specified service */
911 struct rte_service_spec_impl *s;
912 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
913 fprintf(f, "Service %s Summary\n", s->spec.name);
915 service_dump_one(f, s, reset);
919 /* print all services, as UINT32_MAX was passed as id */
920 fprintf(f, "Services Summary\n");
921 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
922 if (!service_valid(i))
925 service_dump_one(f, &rte_services[i], reset);
928 fprintf(f, "Service Cores Summary\n");
929 for (i = 0; i < RTE_MAX_LCORE; i++) {
930 if (lcore_config[i].core_role != ROLE_SERVICE)
934 service_dump_calls_per_lcore(f, i, reset);