1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
11 #include <rte_compat.h>
12 #include <rte_service.h>
13 #include "include/rte_service_component.h"
16 #include <rte_lcore.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_cycles.h>
20 #include <rte_atomic.h>
21 #include <rte_memory.h>
22 #include <rte_malloc.h>
24 #include "eal_private.h"
26 #define RTE_SERVICE_NUM_MAX 64
28 #define SERVICE_F_REGISTERED (1 << 0)
29 #define SERVICE_F_STATS_ENABLED (1 << 1)
30 #define SERVICE_F_START_CHECK (1 << 2)
32 /* runstates for services and lcores, denoting if they are active or not */
33 #define RUNSTATE_STOPPED 0
34 #define RUNSTATE_RUNNING 1
36 /* internal representation of a service */
37 struct rte_service_spec_impl {
38 /* public part of the struct */
39 struct rte_service_spec spec;
41 /* atomic lock that when set indicates a service core is currently
42 * running this service callback. When not set, a core may take the
43 * lock and then run the service callback.
45 rte_atomic32_t execute_lock;
47 /* API set/get-able variables */
50 uint8_t internal_flags;
52 /* per service statistics */
53 rte_atomic32_t num_mapped_cores;
55 uint64_t cycles_spent;
56 } __rte_cache_aligned;
58 /* the internal values of a service core */
60 /* map of services IDs are run on this core */
61 uint64_t service_mask;
62 uint8_t runstate; /* running or stopped */
63 uint8_t is_service_core; /* set if core is currently a service core */
64 uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
66 uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
67 } __rte_cache_aligned;
69 static uint32_t rte_service_count;
70 static struct rte_service_spec_impl *rte_services;
71 static struct core_state *lcore_states;
72 static uint32_t rte_service_library_initialized;
75 rte_service_init(void)
77 if (rte_service_library_initialized) {
79 "service library init() called, init flag %d\n",
80 rte_service_library_initialized);
84 rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
85 sizeof(struct rte_service_spec_impl),
88 RTE_LOG(ERR, EAL, "error allocating rte services array\n");
92 lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
93 sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
95 RTE_LOG(ERR, EAL, "error allocating core states array\n");
101 struct rte_config *cfg = rte_eal_get_configuration();
102 for (i = 0; i < RTE_MAX_LCORE; i++) {
103 if (lcore_config[i].core_role == ROLE_SERVICE) {
104 if ((unsigned int)i == cfg->master_lcore)
106 rte_service_lcore_add(i);
111 rte_service_library_initialized = 1;
114 rte_free(rte_services);
115 rte_free(lcore_states);
120 rte_service_finalize(void)
122 if (!rte_service_library_initialized)
125 rte_free(rte_services);
126 rte_free(lcore_states);
128 rte_service_library_initialized = 0;
131 /* returns 1 if service is registered and has not been unregistered
132 * Returns 0 if service never registered, or has been unregistered
135 service_valid(uint32_t id)
137 return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
140 /* validate ID and retrieve service pointer, or return error value */
141 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \
142 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id)) \
144 service = &rte_services[id]; \
147 /* returns 1 if statistics should be collected for service
148 * Returns 0 if statistics should not be collected for service
151 service_stats_enabled(struct rte_service_spec_impl *impl)
153 return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
157 service_mt_safe(struct rte_service_spec_impl *s)
159 return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
163 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
165 struct rte_service_spec_impl *s;
166 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
169 s->internal_flags |= SERVICE_F_STATS_ENABLED;
171 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
177 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
179 struct rte_service_spec_impl *s;
180 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
183 s->internal_flags |= SERVICE_F_START_CHECK;
185 s->internal_flags &= ~(SERVICE_F_START_CHECK);
191 rte_service_get_count(void)
193 return rte_service_count;
197 rte_service_get_by_name(const char *name, uint32_t *service_id)
203 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
204 if (service_valid(i) &&
205 strcmp(name, rte_services[i].spec.name) == 0) {
215 rte_service_get_name(uint32_t id)
217 struct rte_service_spec_impl *s;
218 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
223 rte_service_probe_capability(uint32_t id, uint32_t capability)
225 struct rte_service_spec_impl *s;
226 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
227 return !!(s->spec.capabilities & capability);
231 rte_service_component_register(const struct rte_service_spec *spec,
235 int32_t free_slot = -1;
237 if (spec->callback == NULL || strlen(spec->name) == 0)
240 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
241 if (!service_valid(i)) {
247 if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
250 struct rte_service_spec_impl *s = &rte_services[free_slot];
252 s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
264 rte_service_component_unregister(uint32_t id)
267 struct rte_service_spec_impl *s;
268 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
273 s->internal_flags &= ~(SERVICE_F_REGISTERED);
275 /* clear the run-bit in all cores */
276 for (i = 0; i < RTE_MAX_LCORE; i++)
277 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
279 memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
285 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
287 struct rte_service_spec_impl *s;
288 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
291 s->comp_runstate = RUNSTATE_RUNNING;
293 s->comp_runstate = RUNSTATE_STOPPED;
300 rte_service_runstate_set(uint32_t id, uint32_t runstate)
302 struct rte_service_spec_impl *s;
303 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
306 s->app_runstate = RUNSTATE_RUNNING;
308 s->app_runstate = RUNSTATE_STOPPED;
315 rte_service_runstate_get(uint32_t id)
317 struct rte_service_spec_impl *s;
318 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
321 int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
322 int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
324 return (s->app_runstate == RUNSTATE_RUNNING) &&
325 (s->comp_runstate == RUNSTATE_RUNNING) &&
326 (check_disabled | lcore_mapped);
330 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
331 struct core_state *cs, uint32_t service_idx)
333 void *userdata = s->spec.callback_userdata;
335 if (service_stats_enabled(s)) {
336 uint64_t start = rte_rdtsc();
337 s->spec.callback(userdata);
338 uint64_t end = rte_rdtsc();
339 s->cycles_spent += end - start;
340 cs->calls_per_service[service_idx]++;
343 s->spec.callback(userdata);
347 static inline int32_t
348 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
350 if (!service_valid(i))
352 struct rte_service_spec_impl *s = &rte_services[i];
353 if (s->comp_runstate != RUNSTATE_RUNNING ||
354 s->app_runstate != RUNSTATE_RUNNING ||
355 !(service_mask & (UINT64_C(1) << i))) {
356 cs->service_active_on_lcore[i] = 0;
360 cs->service_active_on_lcore[i] = 1;
362 /* check do we need cmpset, if MT safe or <= 1 core
363 * mapped, atomic ops are not required.
365 const int use_atomics = (service_mt_safe(s) == 0) &&
366 (rte_atomic32_read(&s->num_mapped_cores) > 1);
368 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
371 rte_service_runner_do_callback(s, cs, i);
372 rte_atomic32_clear(&s->execute_lock);
374 rte_service_runner_do_callback(s, cs, i);
380 rte_service_may_be_active(uint32_t id)
382 uint32_t ids[RTE_MAX_LCORE] = {0};
383 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
386 if (!service_valid(id))
389 for (i = 0; i < lcore_count; i++) {
390 if (lcore_states[i].service_active_on_lcore[id])
398 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
400 /* run service on calling core, using all-ones as the service mask */
401 if (!service_valid(id))
404 struct core_state *cs = &lcore_states[rte_lcore_id()];
405 struct rte_service_spec_impl *s = &rte_services[id];
407 /* Atomically add this core to the mapped cores first, then examine if
408 * we can run the service. This avoids a race condition between
409 * checking the value, and atomically adding to the mapped count.
411 if (serialize_mt_unsafe)
412 rte_atomic32_inc(&s->num_mapped_cores);
414 if (service_mt_safe(s) == 0 &&
415 rte_atomic32_read(&s->num_mapped_cores) > 1) {
416 if (serialize_mt_unsafe)
417 rte_atomic32_dec(&s->num_mapped_cores);
421 int ret = service_run(id, cs, UINT64_MAX);
423 if (serialize_mt_unsafe)
424 rte_atomic32_dec(&s->num_mapped_cores);
430 rte_service_runner_func(void *arg)
434 const int lcore = rte_lcore_id();
435 struct core_state *cs = &lcore_states[lcore];
437 while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
438 const uint64_t service_mask = cs->service_mask;
440 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
441 /* return value ignored as no change to code flow */
442 service_run(i, cs, service_mask);
450 lcore_config[lcore].state = WAIT;
456 rte_service_lcore_count(void)
460 for (i = 0; i < RTE_MAX_LCORE; i++)
461 count += lcore_states[i].is_service_core;
466 rte_service_lcore_list(uint32_t array[], uint32_t n)
468 uint32_t count = rte_service_lcore_count();
477 for (i = 0; i < RTE_MAX_LCORE; i++) {
478 struct core_state *cs = &lcore_states[i];
479 if (cs->is_service_core) {
489 rte_service_lcore_count_services(uint32_t lcore)
491 if (lcore >= RTE_MAX_LCORE)
494 struct core_state *cs = &lcore_states[lcore];
495 if (!cs->is_service_core)
498 return __builtin_popcountll(cs->service_mask);
502 rte_service_start_with_defaults(void)
504 /* create a default mapping from cores to services, then start the
505 * services to make them transparent to unaware applications.
509 uint32_t count = rte_service_get_count();
511 int32_t lcore_iter = 0;
512 uint32_t ids[RTE_MAX_LCORE] = {0};
513 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
515 if (lcore_count == 0)
518 for (i = 0; (int)i < lcore_count; i++)
519 rte_service_lcore_start(ids[i]);
521 for (i = 0; i < count; i++) {
522 /* do 1:1 core mapping here, with each service getting
523 * assigned a single core by default. Adding multiple services
524 * should multiplex to a single core, or 1:1 if there are the
525 * same amount of services as service-cores
527 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
532 if (lcore_iter >= lcore_count)
535 ret = rte_service_runstate_set(i, 1);
544 service_update(struct rte_service_spec *service, uint32_t lcore,
545 uint32_t *set, uint32_t *enabled)
550 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
551 if ((struct rte_service_spec *)&rte_services[i] == service &&
558 if (sid == -1 || lcore >= RTE_MAX_LCORE)
561 if (!lcore_states[lcore].is_service_core)
564 uint64_t sid_mask = UINT64_C(1) << sid;
566 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
569 if (*set && !lcore_mapped) {
570 lcore_states[lcore].service_mask |= sid_mask;
571 rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
573 if (!*set && lcore_mapped) {
574 lcore_states[lcore].service_mask &= ~(sid_mask);
575 rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
580 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
588 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
590 struct rte_service_spec_impl *s;
591 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
592 uint32_t on = enabled > 0;
593 return service_update(&s->spec, lcore, &on, 0);
597 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
599 struct rte_service_spec_impl *s;
600 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
602 int ret = service_update(&s->spec, lcore, 0, &enabled);
609 set_lcore_state(uint32_t lcore, int32_t state)
611 /* mark core state in hugepage backed config */
612 struct rte_config *cfg = rte_eal_get_configuration();
613 cfg->lcore_role[lcore] = state;
615 /* mark state in process local lcore_config */
616 lcore_config[lcore].core_role = state;
618 /* update per-lcore optimized state tracking */
619 lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
623 rte_service_lcore_reset_all(void)
625 /* loop over cores, reset all to mask 0 */
627 for (i = 0; i < RTE_MAX_LCORE; i++) {
628 if (lcore_states[i].is_service_core) {
629 lcore_states[i].service_mask = 0;
630 set_lcore_state(i, ROLE_RTE);
631 lcore_states[i].runstate = RUNSTATE_STOPPED;
634 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
635 rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
643 rte_service_lcore_add(uint32_t lcore)
645 if (lcore >= RTE_MAX_LCORE)
647 if (lcore_states[lcore].is_service_core)
650 set_lcore_state(lcore, ROLE_SERVICE);
652 /* ensure that after adding a core the mask and state are defaults */
653 lcore_states[lcore].service_mask = 0;
654 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
658 return rte_eal_wait_lcore(lcore);
662 rte_service_lcore_del(uint32_t lcore)
664 if (lcore >= RTE_MAX_LCORE)
667 struct core_state *cs = &lcore_states[lcore];
668 if (!cs->is_service_core)
671 if (cs->runstate != RUNSTATE_STOPPED)
674 set_lcore_state(lcore, ROLE_RTE);
681 rte_service_lcore_start(uint32_t lcore)
683 if (lcore >= RTE_MAX_LCORE)
686 struct core_state *cs = &lcore_states[lcore];
687 if (!cs->is_service_core)
690 if (cs->runstate == RUNSTATE_RUNNING)
693 /* set core to run state first, and then launch otherwise it will
694 * return immediately as runstate keeps it in the service poll loop
696 lcore_states[lcore].runstate = RUNSTATE_RUNNING;
698 int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
699 /* returns -EBUSY if the core is already launched, 0 on success */
704 rte_service_lcore_stop(uint32_t lcore)
706 if (lcore >= RTE_MAX_LCORE)
709 if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
713 uint64_t service_mask = lcore_states[lcore].service_mask;
714 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
715 int32_t enabled = service_mask & (UINT64_C(1) << i);
716 int32_t service_running = rte_service_runstate_get(i);
717 int32_t only_core = (1 ==
718 rte_atomic32_read(&rte_services[i].num_mapped_cores));
720 /* if the core is mapped, and the service is running, and this
721 * is the only core that is mapped, the service would cease to
722 * run if this core stopped, so fail instead.
724 if (enabled && service_running && only_core)
728 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
734 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
736 struct rte_service_spec_impl *s;
737 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
743 case RTE_SERVICE_ATTR_CYCLES:
744 *attr_value = s->cycles_spent;
746 case RTE_SERVICE_ATTR_CALL_COUNT:
747 *attr_value = s->calls;
755 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
756 uint64_t *attr_value)
758 struct core_state *cs;
760 if (lcore >= RTE_MAX_LCORE || !attr_value)
763 cs = &lcore_states[lcore];
764 if (!cs->is_service_core)
768 case RTE_SERVICE_LCORE_ATTR_LOOPS:
769 *attr_value = cs->loops;
777 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
778 uint64_t all_cycles, uint32_t reset)
780 /* avoid divide by zero */
797 fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %"
798 PRIu64"\tavg: %"PRIu64"\n",
799 s->spec.name, service_stats_enabled(s), s->calls,
800 s->cycles_spent, s->cycles_spent / calls);
804 rte_service_attr_reset_all(uint32_t id)
806 struct rte_service_spec_impl *s;
807 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
810 rte_service_dump_one(NULL, s, 0, reset);
815 rte_service_lcore_attr_reset_all(uint32_t lcore)
817 struct core_state *cs;
819 if (lcore >= RTE_MAX_LCORE)
822 cs = &lcore_states[lcore];
823 if (!cs->is_service_core)
832 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
835 struct core_state *cs = &lcore_states[lcore];
837 fprintf(f, "%02d\t", lcore);
838 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
839 if (!service_valid(i))
841 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
843 cs->calls_per_service[i] = 0;
849 rte_service_dump(FILE *f, uint32_t id)
852 int print_one = (id != UINT32_MAX);
854 uint64_t total_cycles = 0;
856 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
857 if (!service_valid(i))
859 total_cycles += rte_services[i].cycles_spent;
862 /* print only the specified service */
864 struct rte_service_spec_impl *s;
865 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
866 fprintf(f, "Service %s Summary\n", s->spec.name);
868 rte_service_dump_one(f, s, total_cycles, reset);
872 /* print all services, as UINT32_MAX was passed as id */
873 fprintf(f, "Services Summary\n");
874 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
875 if (!service_valid(i))
878 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
881 fprintf(f, "Service Cores Summary\n");
882 for (i = 0; i < RTE_MAX_LCORE; i++) {
883 if (lcore_config[i].core_role != ROLE_SERVICE)
887 service_dump_calls_per_lcore(f, i, reset);