1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
11 #include <rte_compat.h>
12 #include <rte_service.h>
13 #include "include/rte_service_component.h"
16 #include <rte_lcore.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_cycles.h>
20 #include <rte_atomic.h>
21 #include <rte_memory.h>
22 #include <rte_malloc.h>
24 #define RTE_SERVICE_NUM_MAX 64
26 #define SERVICE_F_REGISTERED (1 << 0)
27 #define SERVICE_F_STATS_ENABLED (1 << 1)
28 #define SERVICE_F_START_CHECK (1 << 2)
30 /* runstates for services and lcores, denoting if they are active or not */
31 #define RUNSTATE_STOPPED 0
32 #define RUNSTATE_RUNNING 1
34 /* internal representation of a service */
35 struct rte_service_spec_impl {
36 /* public part of the struct */
37 struct rte_service_spec spec;
39 /* atomic lock that when set indicates a service core is currently
40 * running this service callback. When not set, a core may take the
41 * lock and then run the service callback.
43 rte_atomic32_t execute_lock;
45 /* API set/get-able variables */
48 uint8_t internal_flags;
50 /* per service statistics */
51 rte_atomic32_t num_mapped_cores;
53 uint64_t cycles_spent;
54 uint8_t active_on_lcore[RTE_MAX_LCORE];
55 } __rte_cache_aligned;
57 /* the internal values of a service core */
59 /* map of services IDs are run on this core */
60 uint64_t service_mask;
61 uint8_t runstate; /* running or stopped */
62 uint8_t is_service_core; /* set if core is currently a service core */
65 uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
66 } __rte_cache_aligned;
68 static uint32_t rte_service_count;
69 static struct rte_service_spec_impl *rte_services;
70 static struct core_state *lcore_states;
71 static uint32_t rte_service_library_initialized;
73 int32_t rte_service_init(void)
75 if (rte_service_library_initialized) {
76 printf("service library init() called, init flag %d\n",
77 rte_service_library_initialized);
81 rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
82 sizeof(struct rte_service_spec_impl),
85 printf("error allocating rte services array\n");
89 lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
90 sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
92 printf("error allocating core states array\n");
98 struct rte_config *cfg = rte_eal_get_configuration();
99 for (i = 0; i < RTE_MAX_LCORE; i++) {
100 if (lcore_config[i].core_role == ROLE_SERVICE) {
101 if ((unsigned int)i == cfg->master_lcore)
103 rte_service_lcore_add(i);
108 rte_service_library_initialized = 1;
112 rte_free(rte_services);
114 rte_free(lcore_states);
119 rte_service_finalize(void)
121 if (!rte_service_library_initialized)
125 rte_free(rte_services);
128 rte_free(lcore_states);
130 rte_service_library_initialized = 0;
133 /* returns 1 if service is registered and has not been unregistered
134 * Returns 0 if service never registered, or has been unregistered
137 service_valid(uint32_t id)
139 return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
142 /* validate ID and retrieve service pointer, or return error value */
143 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \
144 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id)) \
146 service = &rte_services[id]; \
149 /* returns 1 if statistics should be collected for service
150 * Returns 0 if statistics should not be collected for service
153 service_stats_enabled(struct rte_service_spec_impl *impl)
155 return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
159 service_mt_safe(struct rte_service_spec_impl *s)
161 return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
165 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
167 struct rte_service_spec_impl *s;
168 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
171 s->internal_flags |= SERVICE_F_STATS_ENABLED;
173 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
179 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
181 struct rte_service_spec_impl *s;
182 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
185 s->internal_flags |= SERVICE_F_START_CHECK;
187 s->internal_flags &= ~(SERVICE_F_START_CHECK);
193 rte_service_get_count(void)
195 return rte_service_count;
199 rte_service_get_by_name(const char *name, uint32_t *service_id)
205 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
206 if (service_valid(i) &&
207 strcmp(name, rte_services[i].spec.name) == 0) {
217 rte_service_get_name(uint32_t id)
219 struct rte_service_spec_impl *s;
220 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
225 rte_service_probe_capability(uint32_t id, uint32_t capability)
227 struct rte_service_spec_impl *s;
228 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
229 return !!(s->spec.capabilities & capability);
233 rte_service_component_register(const struct rte_service_spec *spec,
237 int32_t free_slot = -1;
239 if (spec->callback == NULL || strlen(spec->name) == 0)
242 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
243 if (!service_valid(i)) {
249 if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
252 struct rte_service_spec_impl *s = &rte_services[free_slot];
254 s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
266 rte_service_component_unregister(uint32_t id)
269 struct rte_service_spec_impl *s;
270 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
275 s->internal_flags &= ~(SERVICE_F_REGISTERED);
277 /* clear the run-bit in all cores */
278 for (i = 0; i < RTE_MAX_LCORE; i++)
279 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
281 memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
287 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
289 struct rte_service_spec_impl *s;
290 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
293 s->comp_runstate = RUNSTATE_RUNNING;
295 s->comp_runstate = RUNSTATE_STOPPED;
302 rte_service_runstate_set(uint32_t id, uint32_t runstate)
304 struct rte_service_spec_impl *s;
305 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
308 s->app_runstate = RUNSTATE_RUNNING;
310 s->app_runstate = RUNSTATE_STOPPED;
317 rte_service_runstate_get(uint32_t id)
319 struct rte_service_spec_impl *s;
320 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
323 int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
324 int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
326 return (s->app_runstate == RUNSTATE_RUNNING) &&
327 (s->comp_runstate == RUNSTATE_RUNNING) &&
328 (check_disabled | lcore_mapped);
332 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
333 struct core_state *cs, uint32_t service_idx)
335 void *userdata = s->spec.callback_userdata;
337 if (service_stats_enabled(s)) {
338 uint64_t start = rte_rdtsc();
339 s->spec.callback(userdata);
340 uint64_t end = rte_rdtsc();
341 s->cycles_spent += end - start;
342 cs->calls_per_service[service_idx]++;
345 s->spec.callback(userdata);
349 static inline int32_t
350 service_run(uint32_t i, int lcore, struct core_state *cs, uint64_t service_mask)
352 if (!service_valid(i))
354 struct rte_service_spec_impl *s = &rte_services[i];
355 if (s->comp_runstate != RUNSTATE_RUNNING ||
356 s->app_runstate != RUNSTATE_RUNNING ||
357 !(service_mask & (UINT64_C(1) << i))) {
358 s->active_on_lcore[lcore] = 0;
362 s->active_on_lcore[lcore] = 1;
364 /* check do we need cmpset, if MT safe or <= 1 core
365 * mapped, atomic ops are not required.
367 const int use_atomics = (service_mt_safe(s) == 0) &&
368 (rte_atomic32_read(&s->num_mapped_cores) > 1);
370 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
373 rte_service_runner_do_callback(s, cs, i);
374 rte_atomic32_clear(&s->execute_lock);
376 rte_service_runner_do_callback(s, cs, i);
382 rte_service_may_be_active(uint32_t id)
384 uint32_t ids[RTE_MAX_LCORE] = {0};
385 struct rte_service_spec_impl *s = &rte_services[id];
386 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
389 if (!service_valid(id))
392 for (i = 0; i < lcore_count; i++) {
393 if (s->active_on_lcore[ids[i]])
400 int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
401 uint32_t serialize_mt_unsafe)
403 /* run service on calling core, using all-ones as the service mask */
404 if (!service_valid(id))
407 struct core_state *cs = &lcore_states[rte_lcore_id()];
408 struct rte_service_spec_impl *s = &rte_services[id];
410 /* Atomically add this core to the mapped cores first, then examine if
411 * we can run the service. This avoids a race condition between
412 * checking the value, and atomically adding to the mapped count.
414 if (serialize_mt_unsafe)
415 rte_atomic32_inc(&s->num_mapped_cores);
417 if (service_mt_safe(s) == 0 &&
418 rte_atomic32_read(&s->num_mapped_cores) > 1) {
419 if (serialize_mt_unsafe)
420 rte_atomic32_dec(&s->num_mapped_cores);
424 int ret = service_run(id, rte_lcore_id(), cs, UINT64_MAX);
426 if (serialize_mt_unsafe)
427 rte_atomic32_dec(&s->num_mapped_cores);
433 rte_service_runner_func(void *arg)
437 const int lcore = rte_lcore_id();
438 struct core_state *cs = &lcore_states[lcore];
440 while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
441 const uint64_t service_mask = cs->service_mask;
443 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
444 /* return value ignored as no change to code flow */
445 service_run(i, lcore, cs, service_mask);
453 lcore_config[lcore].state = WAIT;
459 rte_service_lcore_count(void)
463 for (i = 0; i < RTE_MAX_LCORE; i++)
464 count += lcore_states[i].is_service_core;
469 rte_service_lcore_list(uint32_t array[], uint32_t n)
471 uint32_t count = rte_service_lcore_count();
480 for (i = 0; i < RTE_MAX_LCORE; i++) {
481 struct core_state *cs = &lcore_states[i];
482 if (cs->is_service_core) {
492 rte_service_lcore_count_services(uint32_t lcore)
494 if (lcore >= RTE_MAX_LCORE)
497 struct core_state *cs = &lcore_states[lcore];
498 if (!cs->is_service_core)
501 return __builtin_popcountll(cs->service_mask);
505 rte_service_start_with_defaults(void)
507 /* create a default mapping from cores to services, then start the
508 * services to make them transparent to unaware applications.
512 uint32_t count = rte_service_get_count();
514 int32_t lcore_iter = 0;
515 uint32_t ids[RTE_MAX_LCORE] = {0};
516 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
518 if (lcore_count == 0)
521 for (i = 0; (int)i < lcore_count; i++)
522 rte_service_lcore_start(ids[i]);
524 for (i = 0; i < count; i++) {
525 /* do 1:1 core mapping here, with each service getting
526 * assigned a single core by default. Adding multiple services
527 * should multiplex to a single core, or 1:1 if there are the
528 * same amount of services as service-cores
530 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
535 if (lcore_iter >= lcore_count)
538 ret = rte_service_runstate_set(i, 1);
547 service_update(struct rte_service_spec *service, uint32_t lcore,
548 uint32_t *set, uint32_t *enabled)
553 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
554 if ((struct rte_service_spec *)&rte_services[i] == service &&
561 if (sid == -1 || lcore >= RTE_MAX_LCORE)
564 if (!lcore_states[lcore].is_service_core)
567 uint64_t sid_mask = UINT64_C(1) << sid;
569 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
572 if (*set && !lcore_mapped) {
573 lcore_states[lcore].service_mask |= sid_mask;
574 rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
576 if (!*set && lcore_mapped) {
577 lcore_states[lcore].service_mask &= ~(sid_mask);
578 rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
583 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
591 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
593 struct rte_service_spec_impl *s;
594 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
595 uint32_t on = enabled > 0;
596 return service_update(&s->spec, lcore, &on, 0);
600 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
602 struct rte_service_spec_impl *s;
603 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
605 int ret = service_update(&s->spec, lcore, 0, &enabled);
612 set_lcore_state(uint32_t lcore, int32_t state)
614 /* mark core state in hugepage backed config */
615 struct rte_config *cfg = rte_eal_get_configuration();
616 cfg->lcore_role[lcore] = state;
618 /* mark state in process local lcore_config */
619 lcore_config[lcore].core_role = state;
621 /* update per-lcore optimized state tracking */
622 lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
626 rte_service_lcore_reset_all(void)
628 /* loop over cores, reset all to mask 0 */
630 for (i = 0; i < RTE_MAX_LCORE; i++) {
631 if (lcore_states[i].is_service_core) {
632 lcore_states[i].service_mask = 0;
633 set_lcore_state(i, ROLE_RTE);
634 lcore_states[i].runstate = RUNSTATE_STOPPED;
637 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
638 rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
646 rte_service_lcore_add(uint32_t lcore)
648 if (lcore >= RTE_MAX_LCORE)
650 if (lcore_states[lcore].is_service_core)
653 set_lcore_state(lcore, ROLE_SERVICE);
655 /* ensure that after adding a core the mask and state are defaults */
656 lcore_states[lcore].service_mask = 0;
657 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
661 return rte_eal_wait_lcore(lcore);
665 rte_service_lcore_del(uint32_t lcore)
667 if (lcore >= RTE_MAX_LCORE)
670 struct core_state *cs = &lcore_states[lcore];
671 if (!cs->is_service_core)
674 if (cs->runstate != RUNSTATE_STOPPED)
677 set_lcore_state(lcore, ROLE_RTE);
684 rte_service_lcore_start(uint32_t lcore)
686 if (lcore >= RTE_MAX_LCORE)
689 struct core_state *cs = &lcore_states[lcore];
690 if (!cs->is_service_core)
693 if (cs->runstate == RUNSTATE_RUNNING)
696 /* set core to run state first, and then launch otherwise it will
697 * return immediately as runstate keeps it in the service poll loop
699 lcore_states[lcore].runstate = RUNSTATE_RUNNING;
701 int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
702 /* returns -EBUSY if the core is already launched, 0 on success */
707 rte_service_lcore_stop(uint32_t lcore)
709 if (lcore >= RTE_MAX_LCORE)
712 if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
716 uint64_t service_mask = lcore_states[lcore].service_mask;
717 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
718 int32_t enabled = service_mask & (UINT64_C(1) << i);
719 int32_t service_running = rte_service_runstate_get(i);
720 int32_t only_core = (1 ==
721 rte_atomic32_read(&rte_services[i].num_mapped_cores));
723 /* if the core is mapped, and the service is running, and this
724 * is the only core that is mapped, the service would cease to
725 * run if this core stopped, so fail instead.
727 if (enabled && service_running && only_core)
731 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
737 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
739 struct rte_service_spec_impl *s;
740 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
746 case RTE_SERVICE_ATTR_CYCLES:
747 *attr_value = s->cycles_spent;
749 case RTE_SERVICE_ATTR_CALL_COUNT:
750 *attr_value = s->calls;
758 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
759 uint64_t *attr_value)
761 struct core_state *cs;
763 if (lcore >= RTE_MAX_LCORE || !attr_value)
766 cs = &lcore_states[lcore];
767 if (!cs->is_service_core)
771 case RTE_SERVICE_LCORE_ATTR_LOOPS:
772 *attr_value = cs->loops;
780 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
781 uint64_t all_cycles, uint32_t reset)
783 /* avoid divide by zero */
800 fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %"
801 PRIu64"\tavg: %"PRIu64"\n",
802 s->spec.name, service_stats_enabled(s), s->calls,
803 s->cycles_spent, s->cycles_spent / calls);
807 rte_service_attr_reset_all(uint32_t id)
809 struct rte_service_spec_impl *s;
810 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
813 rte_service_dump_one(NULL, s, 0, reset);
818 rte_service_lcore_attr_reset_all(uint32_t lcore)
820 struct core_state *cs;
822 if (lcore >= RTE_MAX_LCORE)
825 cs = &lcore_states[lcore];
826 if (!cs->is_service_core)
835 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
838 struct core_state *cs = &lcore_states[lcore];
840 fprintf(f, "%02d\t", lcore);
841 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
842 if (!service_valid(i))
844 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
846 cs->calls_per_service[i] = 0;
852 rte_service_dump(FILE *f, uint32_t id)
855 int print_one = (id != UINT32_MAX);
857 uint64_t total_cycles = 0;
859 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
860 if (!service_valid(i))
862 total_cycles += rte_services[i].cycles_spent;
865 /* print only the specified service */
867 struct rte_service_spec_impl *s;
868 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
869 fprintf(f, "Service %s Summary\n", s->spec.name);
871 rte_service_dump_one(f, s, total_cycles, reset);
875 /* print all services, as UINT32_MAX was passed as id */
876 fprintf(f, "Services Summary\n");
877 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
878 if (!service_valid(i))
881 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
884 fprintf(f, "Service Cores Summary\n");
885 for (i = 0; i < RTE_MAX_LCORE; i++) {
886 if (lcore_config[i].core_role != ROLE_SERVICE)
890 service_dump_calls_per_lcore(f, i, reset);