4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <rte_service.h>
42 #include "include/rte_service_component.h"
45 #include <rte_lcore.h>
46 #include <rte_common.h>
47 #include <rte_debug.h>
48 #include <rte_cycles.h>
49 #include <rte_atomic.h>
50 #include <rte_memory.h>
51 #include <rte_malloc.h>
53 #define RTE_SERVICE_NUM_MAX 64
55 #define SERVICE_F_REGISTERED (1 << 0)
56 #define SERVICE_F_STATS_ENABLED (1 << 1)
58 /* runstates for services and lcores, denoting if they are active or not */
59 #define RUNSTATE_STOPPED 0
60 #define RUNSTATE_RUNNING 1
62 /* internal representation of a service */
63 struct rte_service_spec_impl {
64 /* public part of the struct */
65 struct rte_service_spec spec;
67 /* atomic lock that when set indicates a service core is currently
68 * running this service callback. When not set, a core may take the
69 * lock and then run the service callback.
71 rte_atomic32_t execute_lock;
73 /* API set/get-able variables */
76 uint8_t internal_flags;
78 /* per service statistics */
79 uint32_t num_mapped_cores;
81 uint64_t cycles_spent;
82 } __rte_cache_aligned;
84 /* the internal values of a service core */
86 /* map of services IDs are run on this core */
87 uint64_t service_mask;
88 uint8_t runstate; /* running or stopped */
89 uint8_t is_service_core; /* set if core is currently a service core */
91 /* extreme statistics */
92 uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
93 } __rte_cache_aligned;
95 static uint32_t rte_service_count;
96 static struct rte_service_spec_impl *rte_services;
97 static struct core_state *lcore_states;
98 static uint32_t rte_service_library_initialized;
100 int32_t rte_service_init(void)
102 if (rte_service_library_initialized) {
103 printf("service library init() called, init flag %d\n",
104 rte_service_library_initialized);
108 rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
109 sizeof(struct rte_service_spec_impl),
110 RTE_CACHE_LINE_SIZE);
112 printf("error allocating rte services array\n");
116 lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
117 sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
119 printf("error allocating core states array\n");
125 struct rte_config *cfg = rte_eal_get_configuration();
126 for (i = 0; i < RTE_MAX_LCORE; i++) {
127 if (lcore_config[i].core_role == ROLE_SERVICE) {
128 if ((unsigned int)i == cfg->master_lcore)
130 rte_service_lcore_add(i);
135 rte_service_library_initialized = 1;
139 /* returns 1 if service is registered and has not been unregistered
140 * Returns 0 if service never registered, or has been unregistered
143 service_valid(uint32_t id)
145 return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
148 /* validate ID and retrieve service pointer, or return error value */
149 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \
150 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id)) \
152 service = &rte_services[id]; \
155 /* returns 1 if statistics should be colleced for service
156 * Returns 0 if statistics should not be collected for service
159 service_stats_enabled(struct rte_service_spec_impl *impl)
161 return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
165 service_mt_safe(struct rte_service_spec_impl *s)
167 return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
170 int32_t rte_service_set_stats_enable(uint32_t id, int32_t enabled)
172 struct rte_service_spec_impl *s;
173 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
176 s->internal_flags |= SERVICE_F_STATS_ENABLED;
178 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
184 rte_service_get_count(void)
186 return rte_service_count;
189 int32_t rte_service_get_by_name(const char *name, uint32_t *service_id)
195 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
196 if (service_valid(i) &&
197 strcmp(name, rte_services[i].spec.name) == 0) {
207 rte_service_get_name(uint32_t id)
209 struct rte_service_spec_impl *s;
210 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
215 rte_service_probe_capability(uint32_t id, uint32_t capability)
217 struct rte_service_spec_impl *s;
218 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
219 return !!(s->spec.capabilities & capability);
223 rte_service_component_register(const struct rte_service_spec *spec,
227 int32_t free_slot = -1;
229 if (spec->callback == NULL || strlen(spec->name) == 0)
232 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
233 if (!service_valid(i)) {
239 if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
242 struct rte_service_spec_impl *s = &rte_services[free_slot];
244 s->internal_flags |= SERVICE_F_REGISTERED;
256 rte_service_component_unregister(uint32_t id)
259 struct rte_service_spec_impl *s;
260 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
265 s->internal_flags &= ~(SERVICE_F_REGISTERED);
267 /* clear the run-bit in all cores */
268 for (i = 0; i < RTE_MAX_LCORE; i++)
269 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
271 memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
277 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
279 struct rte_service_spec_impl *s;
280 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
283 s->comp_runstate = RUNSTATE_RUNNING;
285 s->comp_runstate = RUNSTATE_STOPPED;
292 rte_service_runstate_set(uint32_t id, uint32_t runstate)
294 struct rte_service_spec_impl *s;
295 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
298 s->app_runstate = RUNSTATE_RUNNING;
300 s->app_runstate = RUNSTATE_STOPPED;
307 rte_service_runstate_get(uint32_t id)
309 struct rte_service_spec_impl *s;
310 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
312 return (s->app_runstate == RUNSTATE_RUNNING) &&
313 (s->comp_runstate == RUNSTATE_RUNNING) &&
314 (s->num_mapped_cores > 0);
318 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
319 struct core_state *cs, uint32_t service_idx)
321 void *userdata = s->spec.callback_userdata;
323 if (service_stats_enabled(s)) {
324 uint64_t start = rte_rdtsc();
325 s->spec.callback(userdata);
326 uint64_t end = rte_rdtsc();
327 s->cycles_spent += end - start;
328 cs->calls_per_service[service_idx]++;
331 s->spec.callback(userdata);
335 rte_service_runner_func(void *arg)
339 const int lcore = rte_lcore_id();
340 struct core_state *cs = &lcore_states[lcore];
342 while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
343 const uint64_t service_mask = cs->service_mask;
345 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
346 if (!service_valid(i))
348 struct rte_service_spec_impl *s = &rte_services[i];
349 if (s->comp_runstate != RUNSTATE_RUNNING ||
350 s->app_runstate != RUNSTATE_RUNNING ||
351 !(service_mask & (UINT64_C(1) << i)))
354 /* check do we need cmpset, if MT safe or <= 1 core
355 * mapped, atomic ops are not required.
357 const int use_atomics = (service_mt_safe(s) == 0) &&
358 (s->num_mapped_cores > 1);
360 uint32_t *lock = (uint32_t *)&s->execute_lock;
361 if (rte_atomic32_cmpset(lock, 0, 1)) {
362 rte_service_runner_do_callback(s, cs, i);
363 rte_atomic32_clear(&s->execute_lock);
366 rte_service_runner_do_callback(s, cs, i);
372 lcore_config[lcore].state = WAIT;
378 rte_service_lcore_count(void)
382 for (i = 0; i < RTE_MAX_LCORE; i++)
383 count += lcore_states[i].is_service_core;
388 rte_service_lcore_list(uint32_t array[], uint32_t n)
390 uint32_t count = rte_service_lcore_count();
399 for (i = 0; i < RTE_MAX_LCORE; i++) {
400 struct core_state *cs = &lcore_states[i];
401 if (cs->is_service_core) {
411 rte_service_lcore_count_services(uint32_t lcore)
413 if (lcore >= RTE_MAX_LCORE)
416 struct core_state *cs = &lcore_states[lcore];
417 if (!cs->is_service_core)
420 return __builtin_popcountll(cs->service_mask);
424 rte_service_start_with_defaults(void)
426 /* create a default mapping from cores to services, then start the
427 * services to make them transparent to unaware applications.
431 uint32_t count = rte_service_get_count();
433 int32_t lcore_iter = 0;
434 uint32_t ids[RTE_MAX_LCORE] = {0};
435 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
437 if (lcore_count == 0)
440 for (i = 0; (int)i < lcore_count; i++)
441 rte_service_lcore_start(ids[i]);
443 for (i = 0; i < count; i++) {
444 /* do 1:1 core mapping here, with each service getting
445 * assigned a single core by default. Adding multiple services
446 * should multiplex to a single core, or 1:1 if there are the
447 * same amount of services as service-cores
449 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
454 if (lcore_iter >= lcore_count)
457 ret = rte_service_runstate_set(i, 1);
466 service_update(struct rte_service_spec *service, uint32_t lcore,
467 uint32_t *set, uint32_t *enabled)
472 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
473 if ((struct rte_service_spec *)&rte_services[i] == service &&
480 if (sid == -1 || lcore >= RTE_MAX_LCORE)
483 if (!lcore_states[lcore].is_service_core)
486 uint64_t sid_mask = UINT64_C(1) << sid;
489 lcore_states[lcore].service_mask |= sid_mask;
490 rte_services[sid].num_mapped_cores++;
492 lcore_states[lcore].service_mask &= ~(sid_mask);
493 rte_services[sid].num_mapped_cores--;
498 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
506 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
508 struct rte_service_spec_impl *s;
509 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
510 uint32_t on = enabled > 0;
511 return service_update(&s->spec, lcore, &on, 0);
515 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
517 struct rte_service_spec_impl *s;
518 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
520 int ret = service_update(&s->spec, lcore, 0, &enabled);
526 int32_t rte_service_lcore_reset_all(void)
528 /* loop over cores, reset all to mask 0 */
530 for (i = 0; i < RTE_MAX_LCORE; i++) {
531 lcore_states[i].service_mask = 0;
532 lcore_states[i].is_service_core = 0;
533 lcore_states[i].runstate = RUNSTATE_STOPPED;
535 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
536 rte_services[i].num_mapped_cores = 0;
544 set_lcore_state(uint32_t lcore, int32_t state)
546 /* mark core state in hugepage backed config */
547 struct rte_config *cfg = rte_eal_get_configuration();
548 cfg->lcore_role[lcore] = state;
550 /* mark state in process local lcore_config */
551 lcore_config[lcore].core_role = state;
553 /* update per-lcore optimized state tracking */
554 lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
558 rte_service_lcore_add(uint32_t lcore)
560 if (lcore >= RTE_MAX_LCORE)
562 if (lcore_states[lcore].is_service_core)
565 set_lcore_state(lcore, ROLE_SERVICE);
567 /* ensure that after adding a core the mask and state are defaults */
568 lcore_states[lcore].service_mask = 0;
569 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
573 return rte_eal_wait_lcore(lcore);
577 rte_service_lcore_del(uint32_t lcore)
579 if (lcore >= RTE_MAX_LCORE)
582 struct core_state *cs = &lcore_states[lcore];
583 if (!cs->is_service_core)
586 if (cs->runstate != RUNSTATE_STOPPED)
589 set_lcore_state(lcore, ROLE_RTE);
596 rte_service_lcore_start(uint32_t lcore)
598 if (lcore >= RTE_MAX_LCORE)
601 struct core_state *cs = &lcore_states[lcore];
602 if (!cs->is_service_core)
605 if (cs->runstate == RUNSTATE_RUNNING)
608 /* set core to run state first, and then launch otherwise it will
609 * return immediately as runstate keeps it in the service poll loop
611 lcore_states[lcore].runstate = RUNSTATE_RUNNING;
613 int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
614 /* returns -EBUSY if the core is already launched, 0 on success */
619 rte_service_lcore_stop(uint32_t lcore)
621 if (lcore >= RTE_MAX_LCORE)
624 if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
628 uint64_t service_mask = lcore_states[lcore].service_mask;
629 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
630 int32_t enabled = service_mask & (UINT64_C(1) << i);
631 int32_t service_running = rte_service_runstate_get(i);
632 int32_t only_core = rte_services[i].num_mapped_cores == 1;
634 /* if the core is mapped, and the service is running, and this
635 * is the only core that is mapped, the service would cease to
636 * run if this core stopped, so fail instead.
638 if (enabled && service_running && only_core)
642 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
648 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
649 uint64_t all_cycles, uint32_t reset)
651 /* avoid divide by zero */
659 fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %"
660 PRIu64"\tavg: %"PRIu64"\n",
661 s->spec.name, service_stats_enabled(s), s->calls,
662 s->cycles_spent, s->cycles_spent / calls);
671 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
674 struct core_state *cs = &lcore_states[lcore];
676 fprintf(f, "%02d\t", lcore);
677 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
678 if (!service_valid(i))
680 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
682 cs->calls_per_service[i] = 0;
687 int32_t rte_service_dump(FILE *f, uint32_t id)
690 int print_one = (id != UINT32_MAX);
692 uint64_t total_cycles = 0;
694 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
695 if (!service_valid(i))
697 total_cycles += rte_services[i].cycles_spent;
700 /* print only the specified service */
702 struct rte_service_spec_impl *s;
703 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
704 fprintf(f, "Service %s Summary\n", s->spec.name);
706 rte_service_dump_one(f, s, total_cycles, reset);
710 /* print all services, as UINT32_MAX was passed as id */
711 fprintf(f, "Services Summary\n");
712 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
713 if (!service_valid(i))
716 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
719 fprintf(f, "Service Cores Summary\n");
720 for (i = 0; i < RTE_MAX_LCORE; i++) {
721 if (lcore_config[i].core_role != ROLE_SERVICE)
725 service_dump_calls_per_lcore(f, i, reset);