4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <rte_service.h>
42 #include "include/rte_service_component.h"
45 #include <rte_lcore.h>
46 #include <rte_common.h>
47 #include <rte_debug.h>
48 #include <rte_cycles.h>
49 #include <rte_atomic.h>
50 #include <rte_memory.h>
51 #include <rte_malloc.h>
53 #define RTE_SERVICE_NUM_MAX 64
55 #define SERVICE_F_REGISTERED (1 << 0)
56 #define SERVICE_F_STATS_ENABLED (1 << 1)
57 #define SERVICE_F_START_CHECK (1 << 2)
59 /* runstates for services and lcores, denoting if they are active or not */
60 #define RUNSTATE_STOPPED 0
61 #define RUNSTATE_RUNNING 1
63 /* internal representation of a service */
64 struct rte_service_spec_impl {
65 /* public part of the struct */
66 struct rte_service_spec spec;
68 /* atomic lock that when set indicates a service core is currently
69 * running this service callback. When not set, a core may take the
70 * lock and then run the service callback.
72 rte_atomic32_t execute_lock;
74 /* API set/get-able variables */
77 uint8_t internal_flags;
79 /* per service statistics */
80 uint32_t num_mapped_cores;
82 uint64_t cycles_spent;
83 } __rte_cache_aligned;
85 /* the internal values of a service core */
87 /* map of services IDs are run on this core */
88 uint64_t service_mask;
89 uint8_t runstate; /* running or stopped */
90 uint8_t is_service_core; /* set if core is currently a service core */
92 /* extreme statistics */
93 uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
94 } __rte_cache_aligned;
96 static uint32_t rte_service_count;
97 static struct rte_service_spec_impl *rte_services;
98 static struct core_state *lcore_states;
99 static uint32_t rte_service_library_initialized;
101 int32_t rte_service_init(void)
103 if (rte_service_library_initialized) {
104 printf("service library init() called, init flag %d\n",
105 rte_service_library_initialized);
109 rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
110 sizeof(struct rte_service_spec_impl),
111 RTE_CACHE_LINE_SIZE);
113 printf("error allocating rte services array\n");
117 lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
118 sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
120 printf("error allocating core states array\n");
126 struct rte_config *cfg = rte_eal_get_configuration();
127 for (i = 0; i < RTE_MAX_LCORE; i++) {
128 if (lcore_config[i].core_role == ROLE_SERVICE) {
129 if ((unsigned int)i == cfg->master_lcore)
131 rte_service_lcore_add(i);
136 rte_service_library_initialized = 1;
140 /* returns 1 if service is registered and has not been unregistered
141 * Returns 0 if service never registered, or has been unregistered
144 service_valid(uint32_t id)
146 return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
149 /* validate ID and retrieve service pointer, or return error value */
150 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \
151 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id)) \
153 service = &rte_services[id]; \
156 /* returns 1 if statistics should be colleced for service
157 * Returns 0 if statistics should not be collected for service
160 service_stats_enabled(struct rte_service_spec_impl *impl)
162 return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
166 service_mt_safe(struct rte_service_spec_impl *s)
168 return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
171 int32_t rte_service_set_stats_enable(uint32_t id, int32_t enabled)
173 struct rte_service_spec_impl *s;
174 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
177 s->internal_flags |= SERVICE_F_STATS_ENABLED;
179 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
184 int32_t rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
186 struct rte_service_spec_impl *s;
187 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
190 s->internal_flags |= SERVICE_F_START_CHECK;
192 s->internal_flags &= ~(SERVICE_F_START_CHECK);
198 rte_service_get_count(void)
200 return rte_service_count;
203 int32_t rte_service_get_by_name(const char *name, uint32_t *service_id)
209 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
210 if (service_valid(i) &&
211 strcmp(name, rte_services[i].spec.name) == 0) {
221 rte_service_get_name(uint32_t id)
223 struct rte_service_spec_impl *s;
224 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
229 rte_service_probe_capability(uint32_t id, uint32_t capability)
231 struct rte_service_spec_impl *s;
232 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
233 return !!(s->spec.capabilities & capability);
237 rte_service_component_register(const struct rte_service_spec *spec,
241 int32_t free_slot = -1;
243 if (spec->callback == NULL || strlen(spec->name) == 0)
246 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
247 if (!service_valid(i)) {
253 if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
256 struct rte_service_spec_impl *s = &rte_services[free_slot];
258 s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
270 rte_service_component_unregister(uint32_t id)
273 struct rte_service_spec_impl *s;
274 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
279 s->internal_flags &= ~(SERVICE_F_REGISTERED);
281 /* clear the run-bit in all cores */
282 for (i = 0; i < RTE_MAX_LCORE; i++)
283 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
285 memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
291 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
293 struct rte_service_spec_impl *s;
294 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
297 s->comp_runstate = RUNSTATE_RUNNING;
299 s->comp_runstate = RUNSTATE_STOPPED;
306 rte_service_runstate_set(uint32_t id, uint32_t runstate)
308 struct rte_service_spec_impl *s;
309 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
312 s->app_runstate = RUNSTATE_RUNNING;
314 s->app_runstate = RUNSTATE_STOPPED;
321 rte_service_runstate_get(uint32_t id)
323 struct rte_service_spec_impl *s;
324 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
327 int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
328 int lcore_mapped = (s->num_mapped_cores > 0);
330 return (s->app_runstate == RUNSTATE_RUNNING) &&
331 (s->comp_runstate == RUNSTATE_RUNNING) &&
332 (check_disabled | lcore_mapped);
336 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
337 struct core_state *cs, uint32_t service_idx)
339 void *userdata = s->spec.callback_userdata;
341 if (service_stats_enabled(s)) {
342 uint64_t start = rte_rdtsc();
343 s->spec.callback(userdata);
344 uint64_t end = rte_rdtsc();
345 s->cycles_spent += end - start;
346 cs->calls_per_service[service_idx]++;
349 s->spec.callback(userdata);
353 static inline int32_t
354 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
356 if (!service_valid(i))
358 struct rte_service_spec_impl *s = &rte_services[i];
359 if (s->comp_runstate != RUNSTATE_RUNNING ||
360 s->app_runstate != RUNSTATE_RUNNING ||
361 !(service_mask & (UINT64_C(1) << i)))
364 /* check do we need cmpset, if MT safe or <= 1 core
365 * mapped, atomic ops are not required.
367 const int use_atomics = (service_mt_safe(s) == 0) &&
368 (s->num_mapped_cores > 1);
370 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
373 rte_service_runner_do_callback(s, cs, i);
374 rte_atomic32_clear(&s->execute_lock);
376 rte_service_runner_do_callback(s, cs, i);
381 int32_t rte_service_run_iter_on_app_lcore(uint32_t id)
383 /* run service on calling core, using all-ones as the service mask */
384 struct core_state *cs = &lcore_states[rte_lcore_id()];
385 return service_run(id, cs, UINT64_MAX);
389 rte_service_runner_func(void *arg)
393 const int lcore = rte_lcore_id();
394 struct core_state *cs = &lcore_states[lcore];
396 while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
397 const uint64_t service_mask = cs->service_mask;
399 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
400 /* return value ignored as no change to code flow */
401 service_run(i, cs, service_mask);
407 lcore_config[lcore].state = WAIT;
413 rte_service_lcore_count(void)
417 for (i = 0; i < RTE_MAX_LCORE; i++)
418 count += lcore_states[i].is_service_core;
423 rte_service_lcore_list(uint32_t array[], uint32_t n)
425 uint32_t count = rte_service_lcore_count();
434 for (i = 0; i < RTE_MAX_LCORE; i++) {
435 struct core_state *cs = &lcore_states[i];
436 if (cs->is_service_core) {
446 rte_service_lcore_count_services(uint32_t lcore)
448 if (lcore >= RTE_MAX_LCORE)
451 struct core_state *cs = &lcore_states[lcore];
452 if (!cs->is_service_core)
455 return __builtin_popcountll(cs->service_mask);
459 rte_service_start_with_defaults(void)
461 /* create a default mapping from cores to services, then start the
462 * services to make them transparent to unaware applications.
466 uint32_t count = rte_service_get_count();
468 int32_t lcore_iter = 0;
469 uint32_t ids[RTE_MAX_LCORE] = {0};
470 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
472 if (lcore_count == 0)
475 for (i = 0; (int)i < lcore_count; i++)
476 rte_service_lcore_start(ids[i]);
478 for (i = 0; i < count; i++) {
479 /* do 1:1 core mapping here, with each service getting
480 * assigned a single core by default. Adding multiple services
481 * should multiplex to a single core, or 1:1 if there are the
482 * same amount of services as service-cores
484 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
489 if (lcore_iter >= lcore_count)
492 ret = rte_service_runstate_set(i, 1);
501 service_update(struct rte_service_spec *service, uint32_t lcore,
502 uint32_t *set, uint32_t *enabled)
507 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
508 if ((struct rte_service_spec *)&rte_services[i] == service &&
515 if (sid == -1 || lcore >= RTE_MAX_LCORE)
518 if (!lcore_states[lcore].is_service_core)
521 uint64_t sid_mask = UINT64_C(1) << sid;
524 lcore_states[lcore].service_mask |= sid_mask;
525 rte_services[sid].num_mapped_cores++;
527 lcore_states[lcore].service_mask &= ~(sid_mask);
528 rte_services[sid].num_mapped_cores--;
533 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
541 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
543 struct rte_service_spec_impl *s;
544 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
545 uint32_t on = enabled > 0;
546 return service_update(&s->spec, lcore, &on, 0);
550 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
552 struct rte_service_spec_impl *s;
553 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
555 int ret = service_update(&s->spec, lcore, 0, &enabled);
561 int32_t rte_service_lcore_reset_all(void)
563 /* loop over cores, reset all to mask 0 */
565 for (i = 0; i < RTE_MAX_LCORE; i++) {
566 lcore_states[i].service_mask = 0;
567 lcore_states[i].is_service_core = 0;
568 lcore_states[i].runstate = RUNSTATE_STOPPED;
570 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
571 rte_services[i].num_mapped_cores = 0;
579 set_lcore_state(uint32_t lcore, int32_t state)
581 /* mark core state in hugepage backed config */
582 struct rte_config *cfg = rte_eal_get_configuration();
583 cfg->lcore_role[lcore] = state;
585 /* mark state in process local lcore_config */
586 lcore_config[lcore].core_role = state;
588 /* update per-lcore optimized state tracking */
589 lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
593 rte_service_lcore_add(uint32_t lcore)
595 if (lcore >= RTE_MAX_LCORE)
597 if (lcore_states[lcore].is_service_core)
600 set_lcore_state(lcore, ROLE_SERVICE);
602 /* ensure that after adding a core the mask and state are defaults */
603 lcore_states[lcore].service_mask = 0;
604 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
608 return rte_eal_wait_lcore(lcore);
612 rte_service_lcore_del(uint32_t lcore)
614 if (lcore >= RTE_MAX_LCORE)
617 struct core_state *cs = &lcore_states[lcore];
618 if (!cs->is_service_core)
621 if (cs->runstate != RUNSTATE_STOPPED)
624 set_lcore_state(lcore, ROLE_RTE);
631 rte_service_lcore_start(uint32_t lcore)
633 if (lcore >= RTE_MAX_LCORE)
636 struct core_state *cs = &lcore_states[lcore];
637 if (!cs->is_service_core)
640 if (cs->runstate == RUNSTATE_RUNNING)
643 /* set core to run state first, and then launch otherwise it will
644 * return immediately as runstate keeps it in the service poll loop
646 lcore_states[lcore].runstate = RUNSTATE_RUNNING;
648 int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
649 /* returns -EBUSY if the core is already launched, 0 on success */
654 rte_service_lcore_stop(uint32_t lcore)
656 if (lcore >= RTE_MAX_LCORE)
659 if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
663 uint64_t service_mask = lcore_states[lcore].service_mask;
664 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
665 int32_t enabled = service_mask & (UINT64_C(1) << i);
666 int32_t service_running = rte_service_runstate_get(i);
667 int32_t only_core = rte_services[i].num_mapped_cores == 1;
669 /* if the core is mapped, and the service is running, and this
670 * is the only core that is mapped, the service would cease to
671 * run if this core stopped, so fail instead.
673 if (enabled && service_running && only_core)
677 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
683 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
684 uint64_t all_cycles, uint32_t reset)
686 /* avoid divide by zero */
694 fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %"
695 PRIu64"\tavg: %"PRIu64"\n",
696 s->spec.name, service_stats_enabled(s), s->calls,
697 s->cycles_spent, s->cycles_spent / calls);
706 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
709 struct core_state *cs = &lcore_states[lcore];
711 fprintf(f, "%02d\t", lcore);
712 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
713 if (!service_valid(i))
715 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
717 cs->calls_per_service[i] = 0;
722 int32_t rte_service_dump(FILE *f, uint32_t id)
725 int print_one = (id != UINT32_MAX);
727 uint64_t total_cycles = 0;
729 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
730 if (!service_valid(i))
732 total_cycles += rte_services[i].cycles_spent;
735 /* print only the specified service */
737 struct rte_service_spec_impl *s;
738 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
739 fprintf(f, "Service %s Summary\n", s->spec.name);
741 rte_service_dump_one(f, s, total_cycles, reset);
745 /* print all services, as UINT32_MAX was passed as id */
746 fprintf(f, "Services Summary\n");
747 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
748 if (!service_valid(i))
751 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
754 fprintf(f, "Service Cores Summary\n");
755 for (i = 0; i < RTE_MAX_LCORE; i++) {
756 if (lcore_config[i].core_role != ROLE_SERVICE)
760 service_dump_calls_per_lcore(f, i, reset);