4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <rte_service.h>
42 #include "include/rte_service_component.h"
45 #include <rte_lcore.h>
46 #include <rte_common.h>
47 #include <rte_debug.h>
48 #include <rte_cycles.h>
49 #include <rte_atomic.h>
50 #include <rte_memory.h>
51 #include <rte_malloc.h>
53 #define RTE_SERVICE_NUM_MAX 64
55 #define SERVICE_F_REGISTERED (1 << 0)
56 #define SERVICE_F_STATS_ENABLED (1 << 1)
58 /* runstates for services and lcores, denoting if they are active or not */
59 #define RUNSTATE_STOPPED 0
60 #define RUNSTATE_RUNNING 1
62 /* internal representation of a service */
63 struct rte_service_spec_impl {
64 /* public part of the struct */
65 struct rte_service_spec spec;
67 /* atomic lock that when set indicates a service core is currently
68 * running this service callback. When not set, a core may take the
69 * lock and then run the service callback.
71 rte_atomic32_t execute_lock;
73 /* API set/get-able variables */
75 uint8_t internal_flags;
77 /* per service statistics */
78 uint32_t num_mapped_cores;
80 uint64_t cycles_spent;
81 } __rte_cache_aligned;
83 /* the internal values of a service core */
85 /* map of services IDs are run on this core */
86 uint64_t service_mask;
87 uint8_t runstate; /* running or stopped */
88 uint8_t is_service_core; /* set if core is currently a service core */
90 /* extreme statistics */
91 uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
92 } __rte_cache_aligned;
94 static uint32_t rte_service_count;
95 static struct rte_service_spec_impl *rte_services;
96 static struct core_state *lcore_states;
97 static uint32_t rte_service_library_initialized;
99 int32_t rte_service_init(void)
101 if (rte_service_library_initialized) {
102 printf("service library init() called, init flag %d\n",
103 rte_service_library_initialized);
107 rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
108 sizeof(struct rte_service_spec_impl),
109 RTE_CACHE_LINE_SIZE);
111 printf("error allocating rte services array\n");
115 lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
116 sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
118 printf("error allocating core states array\n");
124 struct rte_config *cfg = rte_eal_get_configuration();
125 for (i = 0; i < RTE_MAX_LCORE; i++) {
126 if (lcore_config[i].core_role == ROLE_SERVICE) {
127 if ((unsigned int)i == cfg->master_lcore)
129 rte_service_lcore_add(i);
134 rte_service_library_initialized = 1;
138 /* returns 1 if service is registered and has not been unregistered
139 * Returns 0 if service never registered, or has been unregistered
142 service_valid(uint32_t id)
144 return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
147 /* validate ID and retrieve service pointer, or return error value */
148 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \
149 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id)) \
151 service = &rte_services[id]; \
154 /* returns 1 if statistics should be colleced for service
155 * Returns 0 if statistics should not be collected for service
158 service_stats_enabled(struct rte_service_spec_impl *impl)
160 return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
164 service_mt_safe(struct rte_service_spec_impl *s)
166 return s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE;
169 int32_t rte_service_set_stats_enable(uint32_t id, int32_t enabled)
171 struct rte_service_spec_impl *s;
172 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
175 s->internal_flags |= SERVICE_F_STATS_ENABLED;
177 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
183 rte_service_get_count(void)
185 return rte_service_count;
188 struct rte_service_spec *
189 rte_service_get_by_id(uint32_t id)
191 struct rte_service_spec *service = NULL;
192 if (id < rte_service_count)
193 service = (struct rte_service_spec *)&rte_services[id];
198 struct rte_service_spec *rte_service_get_by_name(const char *name)
200 struct rte_service_spec *service = NULL;
202 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
203 if (service_valid(i) &&
204 strcmp(name, rte_services[i].spec.name) == 0) {
205 service = (struct rte_service_spec *)&rte_services[i];
214 rte_service_get_name(uint32_t id)
216 struct rte_service_spec_impl *s;
217 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
222 rte_service_probe_capability(uint32_t id, uint32_t capability)
224 struct rte_service_spec_impl *s;
225 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
226 return s->spec.capabilities & capability;
230 rte_service_component_register(const struct rte_service_spec *spec,
234 int32_t free_slot = -1;
236 if (spec->callback == NULL || strlen(spec->name) == 0)
239 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
240 if (!service_valid(i)) {
246 if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
249 struct rte_service_spec_impl *s = &rte_services[free_slot];
251 s->internal_flags |= SERVICE_F_REGISTERED;
263 rte_service_component_unregister(uint32_t id)
266 struct rte_service_spec_impl *s;
267 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
272 s->internal_flags &= ~(SERVICE_F_REGISTERED);
274 /* clear the run-bit in all cores */
275 for (i = 0; i < RTE_MAX_LCORE; i++)
276 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
278 memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
284 rte_service_runstate_set(uint32_t id, uint32_t runstate)
286 struct rte_service_spec_impl *s;
287 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
290 s->runstate = RUNSTATE_RUNNING;
292 s->runstate = RUNSTATE_STOPPED;
299 rte_service_runstate_get(uint32_t id)
301 struct rte_service_spec_impl *s;
302 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
304 return (s->runstate == RUNSTATE_RUNNING) && (s->num_mapped_cores > 0);
308 rte_service_runner_func(void *arg)
312 const int lcore = rte_lcore_id();
313 struct core_state *cs = &lcore_states[lcore];
315 while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
316 const uint64_t service_mask = cs->service_mask;
317 for (i = 0; i < rte_service_count; i++) {
318 struct rte_service_spec_impl *s = &rte_services[i];
319 if (s->runstate != RUNSTATE_RUNNING ||
320 !(service_mask & (UINT64_C(1) << i)))
323 /* check do we need cmpset, if MT safe or <= 1 core
324 * mapped, atomic ops are not required.
326 const int need_cmpset = !((service_mt_safe(s) == 0) &&
327 (s->num_mapped_cores > 1));
328 uint32_t *lock = (uint32_t *)&s->execute_lock;
330 if (need_cmpset || rte_atomic32_cmpset(lock, 0, 1)) {
331 void *userdata = s->spec.callback_userdata;
333 if (service_stats_enabled(s)) {
334 uint64_t start = rte_rdtsc();
335 s->spec.callback(userdata);
336 uint64_t end = rte_rdtsc();
337 s->cycles_spent += end - start;
338 cs->calls_per_service[i]++;
341 s->spec.callback(userdata);
344 rte_atomic32_clear(&s->execute_lock);
351 lcore_config[lcore].state = WAIT;
357 rte_service_lcore_count(void)
361 for (i = 0; i < RTE_MAX_LCORE; i++)
362 count += lcore_states[i].is_service_core;
367 rte_service_lcore_list(uint32_t array[], uint32_t n)
369 uint32_t count = rte_service_lcore_count();
378 for (i = 0; i < RTE_MAX_LCORE; i++) {
379 struct core_state *cs = &lcore_states[i];
380 if (cs->is_service_core) {
390 rte_service_lcore_count_services(uint32_t lcore)
392 if (lcore >= RTE_MAX_LCORE)
395 struct core_state *cs = &lcore_states[lcore];
396 if (!cs->is_service_core)
399 return __builtin_popcountll(cs->service_mask);
403 rte_service_start_with_defaults(void)
405 /* create a default mapping from cores to services, then start the
406 * services to make them transparent to unaware applications.
410 uint32_t count = rte_service_get_count();
412 int32_t lcore_iter = 0;
413 uint32_t ids[RTE_MAX_LCORE];
414 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
416 if (lcore_count == 0)
419 for (i = 0; (int)i < lcore_count; i++)
420 rte_service_lcore_start(ids[i]);
422 for (i = 0; i < count; i++) {
423 struct rte_service_spec *s = rte_service_get_by_id(i);
427 /* do 1:1 core mapping here, with each service getting
428 * assigned a single core by default. Adding multiple services
429 * should multiplex to a single core, or 1:1 if there are the
430 * same amount of services as service-cores
432 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
437 if (lcore_iter >= lcore_count)
440 ret = rte_service_runstate_set(i, 1);
449 service_update(struct rte_service_spec *service, uint32_t lcore,
450 uint32_t *set, uint32_t *enabled)
455 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
456 if ((struct rte_service_spec *)&rte_services[i] == service &&
463 if (sid == -1 || lcore >= RTE_MAX_LCORE)
466 if (!lcore_states[lcore].is_service_core)
469 uint64_t sid_mask = UINT64_C(1) << sid;
472 lcore_states[lcore].service_mask |= sid_mask;
473 rte_services[sid].num_mapped_cores++;
475 lcore_states[lcore].service_mask &= ~(sid_mask);
476 rte_services[sid].num_mapped_cores--;
481 *enabled = (lcore_states[lcore].service_mask & (sid_mask));
489 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
491 struct rte_service_spec_impl *s;
492 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
493 uint32_t on = enabled > 0;
494 return service_update(&s->spec, lcore, &on, 0);
498 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
500 struct rte_service_spec_impl *s;
501 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
503 int ret = service_update(&s->spec, lcore, 0, &enabled);
509 int32_t rte_service_lcore_reset_all(void)
511 /* loop over cores, reset all to mask 0 */
513 for (i = 0; i < RTE_MAX_LCORE; i++) {
514 lcore_states[i].service_mask = 0;
515 lcore_states[i].is_service_core = 0;
516 lcore_states[i].runstate = RUNSTATE_STOPPED;
518 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
519 rte_services[i].num_mapped_cores = 0;
527 set_lcore_state(uint32_t lcore, int32_t state)
529 /* mark core state in hugepage backed config */
530 struct rte_config *cfg = rte_eal_get_configuration();
531 cfg->lcore_role[lcore] = state;
533 /* mark state in process local lcore_config */
534 lcore_config[lcore].core_role = state;
536 /* update per-lcore optimized state tracking */
537 lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
541 rte_service_lcore_add(uint32_t lcore)
543 if (lcore >= RTE_MAX_LCORE)
545 if (lcore_states[lcore].is_service_core)
548 set_lcore_state(lcore, ROLE_SERVICE);
550 /* ensure that after adding a core the mask and state are defaults */
551 lcore_states[lcore].service_mask = 0;
552 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
559 rte_service_lcore_del(uint32_t lcore)
561 if (lcore >= RTE_MAX_LCORE)
564 struct core_state *cs = &lcore_states[lcore];
565 if (!cs->is_service_core)
568 if (cs->runstate != RUNSTATE_STOPPED)
571 set_lcore_state(lcore, ROLE_RTE);
578 rte_service_lcore_start(uint32_t lcore)
580 if (lcore >= RTE_MAX_LCORE)
583 struct core_state *cs = &lcore_states[lcore];
584 if (!cs->is_service_core)
587 if (cs->runstate == RUNSTATE_RUNNING)
590 /* set core to run state first, and then launch otherwise it will
591 * return immediately as runstate keeps it in the service poll loop
593 lcore_states[lcore].runstate = RUNSTATE_RUNNING;
595 int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
596 /* returns -EBUSY if the core is already launched, 0 on success */
601 rte_service_lcore_stop(uint32_t lcore)
603 if (lcore >= RTE_MAX_LCORE)
606 if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
610 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
612 lcore_states[i].service_mask & (UINT64_C(1) << i);
613 int32_t service_running = rte_services[i].runstate !=
615 int32_t only_core = rte_services[i].num_mapped_cores == 1;
617 /* if the core is mapped, and the service is running, and this
618 * is the only core that is mapped, the service would cease to
619 * run if this core stopped, so fail instead.
621 if (enabled && service_running && only_core)
625 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
631 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
632 uint64_t all_cycles, uint32_t reset)
634 /* avoid divide by zero */
642 fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %"
643 PRIu64"\tavg: %"PRIu64"\n",
644 s->spec.name, service_stats_enabled(s), s->calls,
645 s->cycles_spent, s->cycles_spent / calls);
654 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
657 struct core_state *cs = &lcore_states[lcore];
659 fprintf(f, "%02d\t", lcore);
660 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
661 if (!service_valid(i))
663 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
665 cs->calls_per_service[i] = 0;
670 int32_t rte_service_dump(FILE *f, uint32_t id)
673 int print_one = (id != UINT32_MAX);
675 uint64_t total_cycles = 0;
676 for (i = 0; i < rte_service_count; i++) {
677 if (!service_valid(i))
679 total_cycles += rte_services[i].cycles_spent;
682 /* print only the specified service */
684 struct rte_service_spec_impl *s;
685 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
686 fprintf(f, "Service %s Summary\n", s->spec.name);
688 rte_service_dump_one(f, s, total_cycles, reset);
692 /* print all services, as UINT32_MAX was passed as id */
693 fprintf(f, "Services Summary\n");
694 for (i = 0; i < rte_service_count; i++) {
696 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
699 fprintf(f, "Service Cores Summary\n");
700 for (i = 0; i < RTE_MAX_LCORE; i++) {
701 if (lcore_config[i].core_role != ROLE_SERVICE)
705 service_dump_calls_per_lcore(f, i, reset);