4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <rte_service.h>
42 #include "include/rte_service_component.h"
45 #include <rte_lcore.h>
46 #include <rte_common.h>
47 #include <rte_debug.h>
48 #include <rte_cycles.h>
49 #include <rte_atomic.h>
50 #include <rte_memory.h>
51 #include <rte_malloc.h>
53 #define RTE_SERVICE_NUM_MAX 64
55 #define SERVICE_F_REGISTERED (1 << 0)
56 #define SERVICE_F_STATS_ENABLED (1 << 1)
58 /* runstates for services and lcores, denoting if they are active or not */
59 #define RUNSTATE_STOPPED 0
60 #define RUNSTATE_RUNNING 1
62 /* internal representation of a service */
63 struct rte_service_spec_impl {
64 /* public part of the struct */
65 struct rte_service_spec spec;
67 /* atomic lock that when set indicates a service core is currently
68 * running this service callback. When not set, a core may take the
69 * lock and then run the service callback.
71 rte_atomic32_t execute_lock;
73 /* API set/get-able variables */
75 uint8_t internal_flags;
77 /* per service statistics */
78 uint32_t num_mapped_cores;
80 uint64_t cycles_spent;
81 } __rte_cache_aligned;
83 /* the internal values of a service core */
85 /* map of services IDs are run on this core */
86 uint64_t service_mask;
87 uint8_t runstate; /* running or stopped */
88 uint8_t is_service_core; /* set if core is currently a service core */
90 /* extreme statistics */
91 uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
92 } __rte_cache_aligned;
94 static uint32_t rte_service_count;
95 static struct rte_service_spec_impl *rte_services;
96 static struct core_state *lcore_states;
97 static uint32_t rte_service_library_initialized;
99 int32_t rte_service_init(void)
101 if (rte_service_library_initialized) {
102 printf("service library init() called, init flag %d\n",
103 rte_service_library_initialized);
107 rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
108 sizeof(struct rte_service_spec_impl),
109 RTE_CACHE_LINE_SIZE);
111 printf("error allocating rte services array\n");
115 lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
116 sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
118 printf("error allocating core states array\n");
124 struct rte_config *cfg = rte_eal_get_configuration();
125 for (i = 0; i < RTE_MAX_LCORE; i++) {
126 if (lcore_config[i].core_role == ROLE_SERVICE) {
127 if ((unsigned int)i == cfg->master_lcore)
129 rte_service_lcore_add(i);
134 rte_service_library_initialized = 1;
138 /* returns 1 if service is registered and has not been unregistered
139 * Returns 0 if service never registered, or has been unregistered
142 service_valid(uint32_t id)
144 return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
147 /* validate ID and retrieve service pointer, or return error value */
148 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \
149 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id)) \
151 service = &rte_services[id]; \
154 /* returns 1 if statistics should be colleced for service
155 * Returns 0 if statistics should not be collected for service
158 service_stats_enabled(struct rte_service_spec_impl *impl)
160 return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
164 service_mt_safe(struct rte_service_spec_impl *s)
166 return s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE;
169 int32_t rte_service_set_stats_enable(struct rte_service_spec *service,
172 struct rte_service_spec_impl *impl =
173 (struct rte_service_spec_impl *)service;
178 impl->internal_flags |= SERVICE_F_STATS_ENABLED;
180 impl->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
186 rte_service_get_count(void)
188 return rte_service_count;
191 struct rte_service_spec *
192 rte_service_get_by_id(uint32_t id)
194 struct rte_service_spec *service = NULL;
195 if (id < rte_service_count)
196 service = (struct rte_service_spec *)&rte_services[id];
201 struct rte_service_spec *rte_service_get_by_name(const char *name)
203 struct rte_service_spec *service = NULL;
205 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
206 if (service_valid(i) &&
207 strcmp(name, rte_services[i].spec.name) == 0) {
208 service = (struct rte_service_spec *)&rte_services[i];
217 rte_service_get_name(uint32_t id)
219 struct rte_service_spec_impl *s;
220 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
225 rte_service_probe_capability(uint32_t id, uint32_t capability)
227 struct rte_service_spec_impl *s;
228 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
229 return s->spec.capabilities & capability;
233 rte_service_is_running(const struct rte_service_spec *spec)
235 const struct rte_service_spec_impl *impl =
236 (const struct rte_service_spec_impl *)spec;
240 return (impl->runstate == RUNSTATE_RUNNING) &&
241 (impl->num_mapped_cores > 0);
245 rte_service_component_register(const struct rte_service_spec *spec,
249 int32_t free_slot = -1;
251 if (spec->callback == NULL || strlen(spec->name) == 0)
254 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
255 if (!service_valid(i)) {
261 if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
264 struct rte_service_spec_impl *s = &rte_services[free_slot];
266 s->internal_flags |= SERVICE_F_REGISTERED;
278 rte_service_unregister(struct rte_service_spec *spec)
280 struct rte_service_spec_impl *s = NULL;
281 struct rte_service_spec_impl *spec_impl =
282 (struct rte_service_spec_impl *)spec;
286 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
287 if (&rte_services[i] == spec_impl) {
300 s->internal_flags &= ~(SERVICE_F_REGISTERED);
302 for (i = 0; i < RTE_MAX_LCORE; i++)
303 lcore_states[i].service_mask &= ~(UINT64_C(1) << service_id);
305 memset(&rte_services[service_id], 0,
306 sizeof(struct rte_service_spec_impl));
312 rte_service_start(struct rte_service_spec *service)
314 struct rte_service_spec_impl *s =
315 (struct rte_service_spec_impl *)service;
316 s->runstate = RUNSTATE_RUNNING;
322 rte_service_stop(struct rte_service_spec *service)
324 struct rte_service_spec_impl *s =
325 (struct rte_service_spec_impl *)service;
326 s->runstate = RUNSTATE_STOPPED;
332 rte_service_runner_func(void *arg)
336 const int lcore = rte_lcore_id();
337 struct core_state *cs = &lcore_states[lcore];
339 while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
340 const uint64_t service_mask = cs->service_mask;
341 for (i = 0; i < rte_service_count; i++) {
342 struct rte_service_spec_impl *s = &rte_services[i];
343 if (s->runstate != RUNSTATE_RUNNING ||
344 !(service_mask & (UINT64_C(1) << i)))
347 /* check do we need cmpset, if MT safe or <= 1 core
348 * mapped, atomic ops are not required.
350 const int need_cmpset = !((service_mt_safe(s) == 0) &&
351 (s->num_mapped_cores > 1));
352 uint32_t *lock = (uint32_t *)&s->execute_lock;
354 if (need_cmpset || rte_atomic32_cmpset(lock, 0, 1)) {
355 void *userdata = s->spec.callback_userdata;
357 if (service_stats_enabled(s)) {
358 uint64_t start = rte_rdtsc();
359 s->spec.callback(userdata);
360 uint64_t end = rte_rdtsc();
361 s->cycles_spent += end - start;
362 cs->calls_per_service[i]++;
365 s->spec.callback(userdata);
368 rte_atomic32_clear(&s->execute_lock);
375 lcore_config[lcore].state = WAIT;
381 rte_service_lcore_count(void)
385 for (i = 0; i < RTE_MAX_LCORE; i++)
386 count += lcore_states[i].is_service_core;
391 rte_service_lcore_list(uint32_t array[], uint32_t n)
393 uint32_t count = rte_service_lcore_count();
402 for (i = 0; i < RTE_MAX_LCORE; i++) {
403 struct core_state *cs = &lcore_states[i];
404 if (cs->is_service_core) {
414 rte_service_lcore_count_services(uint32_t lcore)
416 if (lcore >= RTE_MAX_LCORE)
419 struct core_state *cs = &lcore_states[lcore];
420 if (!cs->is_service_core)
423 return __builtin_popcountll(cs->service_mask);
427 rte_service_start_with_defaults(void)
429 /* create a default mapping from cores to services, then start the
430 * services to make them transparent to unaware applications.
434 uint32_t count = rte_service_get_count();
436 int32_t lcore_iter = 0;
437 uint32_t ids[RTE_MAX_LCORE];
438 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
440 if (lcore_count == 0)
443 for (i = 0; (int)i < lcore_count; i++)
444 rte_service_lcore_start(ids[i]);
446 for (i = 0; i < count; i++) {
447 struct rte_service_spec *s = rte_service_get_by_id(i);
451 /* do 1:1 core mapping here, with each service getting
452 * assigned a single core by default. Adding multiple services
453 * should multiplex to a single core, or 1:1 if there are the
454 * same amount of services as service-cores
456 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
461 if (lcore_iter >= lcore_count)
464 ret = rte_service_start(s);
473 service_update(struct rte_service_spec *service, uint32_t lcore,
474 uint32_t *set, uint32_t *enabled)
479 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
480 if ((struct rte_service_spec *)&rte_services[i] == service &&
487 if (sid == -1 || lcore >= RTE_MAX_LCORE)
490 if (!lcore_states[lcore].is_service_core)
493 uint64_t sid_mask = UINT64_C(1) << sid;
496 lcore_states[lcore].service_mask |= sid_mask;
497 rte_services[sid].num_mapped_cores++;
499 lcore_states[lcore].service_mask &= ~(sid_mask);
500 rte_services[sid].num_mapped_cores--;
505 *enabled = (lcore_states[lcore].service_mask & (sid_mask));
513 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
515 struct rte_service_spec_impl *s;
516 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
517 uint32_t on = enabled > 0;
518 return service_update(&s->spec, lcore, &on, 0);
522 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
524 struct rte_service_spec_impl *s;
525 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
527 int ret = service_update(&s->spec, lcore, 0, &enabled);
533 int32_t rte_service_lcore_reset_all(void)
535 /* loop over cores, reset all to mask 0 */
537 for (i = 0; i < RTE_MAX_LCORE; i++) {
538 lcore_states[i].service_mask = 0;
539 lcore_states[i].is_service_core = 0;
540 lcore_states[i].runstate = RUNSTATE_STOPPED;
542 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
543 rte_services[i].num_mapped_cores = 0;
551 set_lcore_state(uint32_t lcore, int32_t state)
553 /* mark core state in hugepage backed config */
554 struct rte_config *cfg = rte_eal_get_configuration();
555 cfg->lcore_role[lcore] = state;
557 /* mark state in process local lcore_config */
558 lcore_config[lcore].core_role = state;
560 /* update per-lcore optimized state tracking */
561 lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
565 rte_service_lcore_add(uint32_t lcore)
567 if (lcore >= RTE_MAX_LCORE)
569 if (lcore_states[lcore].is_service_core)
572 set_lcore_state(lcore, ROLE_SERVICE);
574 /* ensure that after adding a core the mask and state are defaults */
575 lcore_states[lcore].service_mask = 0;
576 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
583 rte_service_lcore_del(uint32_t lcore)
585 if (lcore >= RTE_MAX_LCORE)
588 struct core_state *cs = &lcore_states[lcore];
589 if (!cs->is_service_core)
592 if (cs->runstate != RUNSTATE_STOPPED)
595 set_lcore_state(lcore, ROLE_RTE);
602 rte_service_lcore_start(uint32_t lcore)
604 if (lcore >= RTE_MAX_LCORE)
607 struct core_state *cs = &lcore_states[lcore];
608 if (!cs->is_service_core)
611 if (cs->runstate == RUNSTATE_RUNNING)
614 /* set core to run state first, and then launch otherwise it will
615 * return immediately as runstate keeps it in the service poll loop
617 lcore_states[lcore].runstate = RUNSTATE_RUNNING;
619 int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
620 /* returns -EBUSY if the core is already launched, 0 on success */
625 rte_service_lcore_stop(uint32_t lcore)
627 if (lcore >= RTE_MAX_LCORE)
630 if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
634 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
636 lcore_states[i].service_mask & (UINT64_C(1) << i);
637 int32_t service_running = rte_services[i].runstate !=
639 int32_t only_core = rte_services[i].num_mapped_cores == 1;
641 /* if the core is mapped, and the service is running, and this
642 * is the only core that is mapped, the service would cease to
643 * run if this core stopped, so fail instead.
645 if (enabled && service_running && only_core)
649 lcore_states[lcore].runstate = RUNSTATE_STOPPED;
655 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
656 uint64_t all_cycles, uint32_t reset)
658 /* avoid divide by zero */
666 fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %"
667 PRIu64"\tavg: %"PRIu64"\n",
668 s->spec.name, service_stats_enabled(s), s->calls,
669 s->cycles_spent, s->cycles_spent / calls);
678 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
681 struct core_state *cs = &lcore_states[lcore];
683 fprintf(f, "%02d\t", lcore);
684 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
685 if (!service_valid(i))
687 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
689 cs->calls_per_service[i] = 0;
694 int32_t rte_service_dump(FILE *f, struct rte_service_spec *service)
698 uint64_t total_cycles = 0;
699 for (i = 0; i < rte_service_count; i++) {
700 if (!service_valid(i))
702 total_cycles += rte_services[i].cycles_spent;
706 struct rte_service_spec_impl *s =
707 (struct rte_service_spec_impl *)service;
708 fprintf(f, "Service %s Summary\n", s->spec.name);
710 rte_service_dump_one(f, s, total_cycles, reset);
714 fprintf(f, "Services Summary\n");
715 for (i = 0; i < rte_service_count; i++) {
717 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
720 fprintf(f, "Service Cores Summary\n");
721 for (i = 0; i < RTE_MAX_LCORE; i++) {
722 if (lcore_config[i].core_role != ROLE_SERVICE)
726 service_dump_calls_per_lcore(f, i, reset);