service: fix memory leak with new function
[dpdk.git] / lib / librte_eal / common / rte_service.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <string.h>
10 #include <dirent.h>
11
12 #include <rte_service.h>
13 #include "include/rte_service_component.h"
14
15 #include <rte_eal.h>
16 #include <rte_lcore.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_cycles.h>
20 #include <rte_atomic.h>
21 #include <rte_memory.h>
22 #include <rte_malloc.h>
23
24 #define RTE_SERVICE_NUM_MAX 64
25
26 #define SERVICE_F_REGISTERED    (1 << 0)
27 #define SERVICE_F_STATS_ENABLED (1 << 1)
28 #define SERVICE_F_START_CHECK   (1 << 2)
29
30 /* runstates for services and lcores, denoting if they are active or not */
31 #define RUNSTATE_STOPPED 0
32 #define RUNSTATE_RUNNING 1
33
34 /* internal representation of a service */
35 struct rte_service_spec_impl {
36         /* public part of the struct */
37         struct rte_service_spec spec;
38
39         /* atomic lock that when set indicates a service core is currently
40          * running this service callback. When not set, a core may take the
41          * lock and then run the service callback.
42          */
43         rte_atomic32_t execute_lock;
44
45         /* API set/get-able variables */
46         int8_t app_runstate;
47         int8_t comp_runstate;
48         uint8_t internal_flags;
49
50         /* per service statistics */
51         rte_atomic32_t num_mapped_cores;
52         uint64_t calls;
53         uint64_t cycles_spent;
54 } __rte_cache_aligned;
55
56 /* the internal values of a service core */
57 struct core_state {
58         /* map of services IDs are run on this core */
59         uint64_t service_mask;
60         uint8_t runstate; /* running or stopped */
61         uint8_t is_service_core; /* set if core is currently a service core */
62
63         /* extreme statistics */
64         uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
65 } __rte_cache_aligned;
66
67 static uint32_t rte_service_count;
68 static struct rte_service_spec_impl *rte_services;
69 static struct core_state *lcore_states;
70 static uint32_t rte_service_library_initialized;
71
72 int32_t rte_service_init(void)
73 {
74         if (rte_service_library_initialized) {
75                 printf("service library init() called, init flag %d\n",
76                         rte_service_library_initialized);
77                 return -EALREADY;
78         }
79
80         rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
81                         sizeof(struct rte_service_spec_impl),
82                         RTE_CACHE_LINE_SIZE);
83         if (!rte_services) {
84                 printf("error allocating rte services array\n");
85                 goto fail_mem;
86         }
87
88         lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
89                         sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
90         if (!lcore_states) {
91                 printf("error allocating core states array\n");
92                 goto fail_mem;
93         }
94
95         int i;
96         int count = 0;
97         struct rte_config *cfg = rte_eal_get_configuration();
98         for (i = 0; i < RTE_MAX_LCORE; i++) {
99                 if (lcore_config[i].core_role == ROLE_SERVICE) {
100                         if ((unsigned int)i == cfg->master_lcore)
101                                 continue;
102                         rte_service_lcore_add(i);
103                         count++;
104                 }
105         }
106
107         rte_service_library_initialized = 1;
108         return 0;
109 fail_mem:
110         if (rte_services)
111                 rte_free(rte_services);
112         if (lcore_states)
113                 rte_free(lcore_states);
114         return -ENOMEM;
115 }
116
117 void rte_service_finalize(void)
118 {
119         if (!rte_service_library_initialized)
120                 return;
121
122         if (rte_services)
123                 rte_free(rte_services);
124
125         if (lcore_states)
126                 rte_free(lcore_states);
127
128         rte_service_library_initialized = 0;
129 }
130
131 /* returns 1 if service is registered and has not been unregistered
132  * Returns 0 if service never registered, or has been unregistered
133  */
134 static inline int
135 service_valid(uint32_t id)
136 {
137         return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
138 }
139
140 /* validate ID and retrieve service pointer, or return error value */
141 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
142         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
143                 return retval;                                          \
144         service = &rte_services[id];                                    \
145 } while (0)
146
147 /* returns 1 if statistics should be collected for service
148  * Returns 0 if statistics should not be collected for service
149  */
150 static inline int
151 service_stats_enabled(struct rte_service_spec_impl *impl)
152 {
153         return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
154 }
155
156 static inline int
157 service_mt_safe(struct rte_service_spec_impl *s)
158 {
159         return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
160 }
161
162 int32_t rte_service_set_stats_enable(uint32_t id, int32_t enabled)
163 {
164         struct rte_service_spec_impl *s;
165         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
166
167         if (enabled)
168                 s->internal_flags |= SERVICE_F_STATS_ENABLED;
169         else
170                 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
171
172         return 0;
173 }
174
175 int32_t rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
176 {
177         struct rte_service_spec_impl *s;
178         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
179
180         if (enabled)
181                 s->internal_flags |= SERVICE_F_START_CHECK;
182         else
183                 s->internal_flags &= ~(SERVICE_F_START_CHECK);
184
185         return 0;
186 }
187
188 uint32_t
189 rte_service_get_count(void)
190 {
191         return rte_service_count;
192 }
193
194 int32_t rte_service_get_by_name(const char *name, uint32_t *service_id)
195 {
196         if (!service_id)
197                 return -EINVAL;
198
199         int i;
200         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
201                 if (service_valid(i) &&
202                                 strcmp(name, rte_services[i].spec.name) == 0) {
203                         *service_id = i;
204                         return 0;
205                 }
206         }
207
208         return -ENODEV;
209 }
210
211 const char *
212 rte_service_get_name(uint32_t id)
213 {
214         struct rte_service_spec_impl *s;
215         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
216         return s->spec.name;
217 }
218
219 int32_t
220 rte_service_probe_capability(uint32_t id, uint32_t capability)
221 {
222         struct rte_service_spec_impl *s;
223         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
224         return !!(s->spec.capabilities & capability);
225 }
226
227 int32_t
228 rte_service_component_register(const struct rte_service_spec *spec,
229                                uint32_t *id_ptr)
230 {
231         uint32_t i;
232         int32_t free_slot = -1;
233
234         if (spec->callback == NULL || strlen(spec->name) == 0)
235                 return -EINVAL;
236
237         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
238                 if (!service_valid(i)) {
239                         free_slot = i;
240                         break;
241                 }
242         }
243
244         if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
245                 return -ENOSPC;
246
247         struct rte_service_spec_impl *s = &rte_services[free_slot];
248         s->spec = *spec;
249         s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
250
251         rte_smp_wmb();
252         rte_service_count++;
253
254         if (id_ptr)
255                 *id_ptr = free_slot;
256
257         return 0;
258 }
259
260 int32_t
261 rte_service_component_unregister(uint32_t id)
262 {
263         uint32_t i;
264         struct rte_service_spec_impl *s;
265         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
266
267         rte_service_count--;
268         rte_smp_wmb();
269
270         s->internal_flags &= ~(SERVICE_F_REGISTERED);
271
272         /* clear the run-bit in all cores */
273         for (i = 0; i < RTE_MAX_LCORE; i++)
274                 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
275
276         memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
277
278         return 0;
279 }
280
281 int32_t
282 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
283 {
284         struct rte_service_spec_impl *s;
285         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
286
287         if (runstate)
288                 s->comp_runstate = RUNSTATE_RUNNING;
289         else
290                 s->comp_runstate = RUNSTATE_STOPPED;
291
292         rte_smp_wmb();
293         return 0;
294 }
295
296 int32_t
297 rte_service_runstate_set(uint32_t id, uint32_t runstate)
298 {
299         struct rte_service_spec_impl *s;
300         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
301
302         if (runstate)
303                 s->app_runstate = RUNSTATE_RUNNING;
304         else
305                 s->app_runstate = RUNSTATE_STOPPED;
306
307         rte_smp_wmb();
308         return 0;
309 }
310
311 int32_t
312 rte_service_runstate_get(uint32_t id)
313 {
314         struct rte_service_spec_impl *s;
315         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
316         rte_smp_rmb();
317
318         int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
319         int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
320
321         return (s->app_runstate == RUNSTATE_RUNNING) &&
322                 (s->comp_runstate == RUNSTATE_RUNNING) &&
323                 (check_disabled | lcore_mapped);
324 }
325
326 static inline void
327 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
328                                struct core_state *cs, uint32_t service_idx)
329 {
330         void *userdata = s->spec.callback_userdata;
331
332         if (service_stats_enabled(s)) {
333                 uint64_t start = rte_rdtsc();
334                 s->spec.callback(userdata);
335                 uint64_t end = rte_rdtsc();
336                 s->cycles_spent += end - start;
337                 cs->calls_per_service[service_idx]++;
338                 s->calls++;
339         } else
340                 s->spec.callback(userdata);
341 }
342
343
344 static inline int32_t
345 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
346 {
347         if (!service_valid(i))
348                 return -EINVAL;
349         struct rte_service_spec_impl *s = &rte_services[i];
350         if (s->comp_runstate != RUNSTATE_RUNNING ||
351                         s->app_runstate != RUNSTATE_RUNNING ||
352                         !(service_mask & (UINT64_C(1) << i)))
353                 return -ENOEXEC;
354
355         /* check do we need cmpset, if MT safe or <= 1 core
356          * mapped, atomic ops are not required.
357          */
358         const int use_atomics = (service_mt_safe(s) == 0) &&
359                                 (rte_atomic32_read(&s->num_mapped_cores) > 1);
360         if (use_atomics) {
361                 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
362                         return -EBUSY;
363
364                 rte_service_runner_do_callback(s, cs, i);
365                 rte_atomic32_clear(&s->execute_lock);
366         } else
367                 rte_service_runner_do_callback(s, cs, i);
368
369         return 0;
370 }
371
372 int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
373                 uint32_t serialize_mt_unsafe)
374 {
375         /* run service on calling core, using all-ones as the service mask */
376         if (!service_valid(id))
377                 return -EINVAL;
378
379         struct core_state *cs = &lcore_states[rte_lcore_id()];
380         struct rte_service_spec_impl *s = &rte_services[id];
381
382         /* Atomically add this core to the mapped cores first, then examine if
383          * we can run the service. This avoids a race condition between
384          * checking the value, and atomically adding to the mapped count.
385          */
386         if (serialize_mt_unsafe)
387                 rte_atomic32_inc(&s->num_mapped_cores);
388
389         if (service_mt_safe(s) == 0 &&
390                         rte_atomic32_read(&s->num_mapped_cores) > 1) {
391                 if (serialize_mt_unsafe)
392                         rte_atomic32_dec(&s->num_mapped_cores);
393                 return -EBUSY;
394         }
395
396         int ret = service_run(id, cs, UINT64_MAX);
397
398         if (serialize_mt_unsafe)
399                 rte_atomic32_dec(&s->num_mapped_cores);
400
401         return ret;
402 }
403
404 static int32_t
405 rte_service_runner_func(void *arg)
406 {
407         RTE_SET_USED(arg);
408         uint32_t i;
409         const int lcore = rte_lcore_id();
410         struct core_state *cs = &lcore_states[lcore];
411
412         while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
413                 const uint64_t service_mask = cs->service_mask;
414
415                 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
416                         /* return value ignored as no change to code flow */
417                         service_run(i, cs, service_mask);
418                 }
419
420                 rte_smp_rmb();
421         }
422
423         lcore_config[lcore].state = WAIT;
424
425         return 0;
426 }
427
428 int32_t
429 rte_service_lcore_count(void)
430 {
431         int32_t count = 0;
432         uint32_t i;
433         for (i = 0; i < RTE_MAX_LCORE; i++)
434                 count += lcore_states[i].is_service_core;
435         return count;
436 }
437
438 int32_t
439 rte_service_lcore_list(uint32_t array[], uint32_t n)
440 {
441         uint32_t count = rte_service_lcore_count();
442         if (count > n)
443                 return -ENOMEM;
444
445         if (!array)
446                 return -EINVAL;
447
448         uint32_t i;
449         uint32_t idx = 0;
450         for (i = 0; i < RTE_MAX_LCORE; i++) {
451                 struct core_state *cs = &lcore_states[i];
452                 if (cs->is_service_core) {
453                         array[idx] = i;
454                         idx++;
455                 }
456         }
457
458         return count;
459 }
460
461 int32_t
462 rte_service_lcore_count_services(uint32_t lcore)
463 {
464         if (lcore >= RTE_MAX_LCORE)
465                 return -EINVAL;
466
467         struct core_state *cs = &lcore_states[lcore];
468         if (!cs->is_service_core)
469                 return -ENOTSUP;
470
471         return __builtin_popcountll(cs->service_mask);
472 }
473
474 int32_t
475 rte_service_start_with_defaults(void)
476 {
477         /* create a default mapping from cores to services, then start the
478          * services to make them transparent to unaware applications.
479          */
480         uint32_t i;
481         int ret;
482         uint32_t count = rte_service_get_count();
483
484         int32_t lcore_iter = 0;
485         uint32_t ids[RTE_MAX_LCORE] = {0};
486         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
487
488         if (lcore_count == 0)
489                 return -ENOTSUP;
490
491         for (i = 0; (int)i < lcore_count; i++)
492                 rte_service_lcore_start(ids[i]);
493
494         for (i = 0; i < count; i++) {
495                 /* do 1:1 core mapping here, with each service getting
496                  * assigned a single core by default. Adding multiple services
497                  * should multiplex to a single core, or 1:1 if there are the
498                  * same amount of services as service-cores
499                  */
500                 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
501                 if (ret)
502                         return -ENODEV;
503
504                 lcore_iter++;
505                 if (lcore_iter >= lcore_count)
506                         lcore_iter = 0;
507
508                 ret = rte_service_runstate_set(i, 1);
509                 if (ret)
510                         return -ENOEXEC;
511         }
512
513         return 0;
514 }
515
516 static int32_t
517 service_update(struct rte_service_spec *service, uint32_t lcore,
518                 uint32_t *set, uint32_t *enabled)
519 {
520         uint32_t i;
521         int32_t sid = -1;
522
523         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
524                 if ((struct rte_service_spec *)&rte_services[i] == service &&
525                                 service_valid(i)) {
526                         sid = i;
527                         break;
528                 }
529         }
530
531         if (sid == -1 || lcore >= RTE_MAX_LCORE)
532                 return -EINVAL;
533
534         if (!lcore_states[lcore].is_service_core)
535                 return -EINVAL;
536
537         uint64_t sid_mask = UINT64_C(1) << sid;
538         if (set) {
539                 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
540                         sid_mask;
541
542                 if (*set && !lcore_mapped) {
543                         lcore_states[lcore].service_mask |= sid_mask;
544                         rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
545                 }
546                 if (!*set && lcore_mapped) {
547                         lcore_states[lcore].service_mask &= ~(sid_mask);
548                         rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
549                 }
550         }
551
552         if (enabled)
553                 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
554
555         rte_smp_wmb();
556
557         return 0;
558 }
559
560 int32_t
561 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
562 {
563         struct rte_service_spec_impl *s;
564         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
565         uint32_t on = enabled > 0;
566         return service_update(&s->spec, lcore, &on, 0);
567 }
568
569 int32_t
570 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
571 {
572         struct rte_service_spec_impl *s;
573         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
574         uint32_t enabled;
575         int ret = service_update(&s->spec, lcore, 0, &enabled);
576         if (ret == 0)
577                 return enabled;
578         return ret;
579 }
580
581 static void
582 set_lcore_state(uint32_t lcore, int32_t state)
583 {
584         /* mark core state in hugepage backed config */
585         struct rte_config *cfg = rte_eal_get_configuration();
586         cfg->lcore_role[lcore] = state;
587
588         /* mark state in process local lcore_config */
589         lcore_config[lcore].core_role = state;
590
591         /* update per-lcore optimized state tracking */
592         lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
593 }
594
595 int32_t rte_service_lcore_reset_all(void)
596 {
597         /* loop over cores, reset all to mask 0 */
598         uint32_t i;
599         for (i = 0; i < RTE_MAX_LCORE; i++) {
600                 if (lcore_states[i].is_service_core) {
601                         lcore_states[i].service_mask = 0;
602                         set_lcore_state(i, ROLE_RTE);
603                         lcore_states[i].runstate = RUNSTATE_STOPPED;
604                 }
605         }
606         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
607                 rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
608
609         rte_smp_wmb();
610
611         return 0;
612 }
613
614 int32_t
615 rte_service_lcore_add(uint32_t lcore)
616 {
617         if (lcore >= RTE_MAX_LCORE)
618                 return -EINVAL;
619         if (lcore_states[lcore].is_service_core)
620                 return -EALREADY;
621
622         set_lcore_state(lcore, ROLE_SERVICE);
623
624         /* ensure that after adding a core the mask and state are defaults */
625         lcore_states[lcore].service_mask = 0;
626         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
627
628         rte_smp_wmb();
629
630         return rte_eal_wait_lcore(lcore);
631 }
632
633 int32_t
634 rte_service_lcore_del(uint32_t lcore)
635 {
636         if (lcore >= RTE_MAX_LCORE)
637                 return -EINVAL;
638
639         struct core_state *cs = &lcore_states[lcore];
640         if (!cs->is_service_core)
641                 return -EINVAL;
642
643         if (cs->runstate != RUNSTATE_STOPPED)
644                 return -EBUSY;
645
646         set_lcore_state(lcore, ROLE_RTE);
647
648         rte_smp_wmb();
649         return 0;
650 }
651
652 int32_t
653 rte_service_lcore_start(uint32_t lcore)
654 {
655         if (lcore >= RTE_MAX_LCORE)
656                 return -EINVAL;
657
658         struct core_state *cs = &lcore_states[lcore];
659         if (!cs->is_service_core)
660                 return -EINVAL;
661
662         if (cs->runstate == RUNSTATE_RUNNING)
663                 return -EALREADY;
664
665         /* set core to run state first, and then launch otherwise it will
666          * return immediately as runstate keeps it in the service poll loop
667          */
668         lcore_states[lcore].runstate = RUNSTATE_RUNNING;
669
670         int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
671         /* returns -EBUSY if the core is already launched, 0 on success */
672         return ret;
673 }
674
675 int32_t
676 rte_service_lcore_stop(uint32_t lcore)
677 {
678         if (lcore >= RTE_MAX_LCORE)
679                 return -EINVAL;
680
681         if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
682                 return -EALREADY;
683
684         uint32_t i;
685         uint64_t service_mask = lcore_states[lcore].service_mask;
686         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
687                 int32_t enabled = service_mask & (UINT64_C(1) << i);
688                 int32_t service_running = rte_service_runstate_get(i);
689                 int32_t only_core = (1 ==
690                         rte_atomic32_read(&rte_services[i].num_mapped_cores));
691
692                 /* if the core is mapped, and the service is running, and this
693                  * is the only core that is mapped, the service would cease to
694                  * run if this core stopped, so fail instead.
695                  */
696                 if (enabled && service_running && only_core)
697                         return -EBUSY;
698         }
699
700         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
701
702         return 0;
703 }
704
705 int32_t
706 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint32_t *attr_value)
707 {
708         struct rte_service_spec_impl *s;
709         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
710
711         if (!attr_value)
712                 return -EINVAL;
713
714         switch (attr_id) {
715         case RTE_SERVICE_ATTR_CYCLES:
716                 *attr_value = s->cycles_spent;
717                 return 0;
718         case RTE_SERVICE_ATTR_CALL_COUNT:
719                 *attr_value = s->calls;
720                 return 0;
721         default:
722                 return -EINVAL;
723         }
724 }
725
726 static void
727 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
728                      uint64_t all_cycles, uint32_t reset)
729 {
730         /* avoid divide by zero */
731         if (all_cycles == 0)
732                 all_cycles = 1;
733
734         int calls = 1;
735         if (s->calls != 0)
736                 calls = s->calls;
737
738         if (reset) {
739                 s->cycles_spent = 0;
740                 s->calls = 0;
741                 return;
742         }
743
744         fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
745                         PRIu64"\tavg: %"PRIu64"\n",
746                         s->spec.name, service_stats_enabled(s), s->calls,
747                         s->cycles_spent, s->cycles_spent / calls);
748 }
749
750 int32_t
751 rte_service_attr_reset_all(uint32_t id)
752 {
753         struct rte_service_spec_impl *s;
754         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
755
756         int reset = 1;
757         rte_service_dump_one(NULL, s, 0, reset);
758         return 0;
759 }
760
761 static void
762 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
763 {
764         uint32_t i;
765         struct core_state *cs = &lcore_states[lcore];
766
767         fprintf(f, "%02d\t", lcore);
768         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
769                 if (!service_valid(i))
770                         continue;
771                 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
772                 if (reset)
773                         cs->calls_per_service[i] = 0;
774         }
775         fprintf(f, "\n");
776 }
777
778 int32_t rte_service_dump(FILE *f, uint32_t id)
779 {
780         uint32_t i;
781         int print_one = (id != UINT32_MAX);
782
783         uint64_t total_cycles = 0;
784
785         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
786                 if (!service_valid(i))
787                         continue;
788                 total_cycles += rte_services[i].cycles_spent;
789         }
790
791         /* print only the specified service */
792         if (print_one) {
793                 struct rte_service_spec_impl *s;
794                 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
795                 fprintf(f, "Service %s Summary\n", s->spec.name);
796                 uint32_t reset = 0;
797                 rte_service_dump_one(f, s, total_cycles, reset);
798                 return 0;
799         }
800
801         /* print all services, as UINT32_MAX was passed as id */
802         fprintf(f, "Services Summary\n");
803         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
804                 if (!service_valid(i))
805                         continue;
806                 uint32_t reset = 0;
807                 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
808         }
809
810         fprintf(f, "Service Cores Summary\n");
811         for (i = 0; i < RTE_MAX_LCORE; i++) {
812                 if (lcore_config[i].core_role != ROLE_SERVICE)
813                         continue;
814
815                 uint32_t reset = 0;
816                 service_dump_calls_per_lcore(f, i, reset);
817         }
818
819         return 0;
820 }