service: fix possible mem leak on initialize
[dpdk.git] / lib / librte_eal / common / rte_service.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <string.h>
10 #include <dirent.h>
11
12 #include <rte_service.h>
13 #include "include/rte_service_component.h"
14
15 #include <rte_eal.h>
16 #include <rte_lcore.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_cycles.h>
20 #include <rte_atomic.h>
21 #include <rte_memory.h>
22 #include <rte_malloc.h>
23
24 #define RTE_SERVICE_NUM_MAX 64
25
26 #define SERVICE_F_REGISTERED    (1 << 0)
27 #define SERVICE_F_STATS_ENABLED (1 << 1)
28 #define SERVICE_F_START_CHECK   (1 << 2)
29
30 /* runstates for services and lcores, denoting if they are active or not */
31 #define RUNSTATE_STOPPED 0
32 #define RUNSTATE_RUNNING 1
33
34 /* internal representation of a service */
35 struct rte_service_spec_impl {
36         /* public part of the struct */
37         struct rte_service_spec spec;
38
39         /* atomic lock that when set indicates a service core is currently
40          * running this service callback. When not set, a core may take the
41          * lock and then run the service callback.
42          */
43         rte_atomic32_t execute_lock;
44
45         /* API set/get-able variables */
46         int8_t app_runstate;
47         int8_t comp_runstate;
48         uint8_t internal_flags;
49
50         /* per service statistics */
51         rte_atomic32_t num_mapped_cores;
52         uint64_t calls;
53         uint64_t cycles_spent;
54 } __rte_cache_aligned;
55
56 /* the internal values of a service core */
57 struct core_state {
58         /* map of services IDs are run on this core */
59         uint64_t service_mask;
60         uint8_t runstate; /* running or stopped */
61         uint8_t is_service_core; /* set if core is currently a service core */
62
63         /* extreme statistics */
64         uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
65 } __rte_cache_aligned;
66
67 static uint32_t rte_service_count;
68 static struct rte_service_spec_impl *rte_services;
69 static struct core_state *lcore_states;
70 static uint32_t rte_service_library_initialized;
71
72 int32_t rte_service_init(void)
73 {
74         if (rte_service_library_initialized) {
75                 printf("service library init() called, init flag %d\n",
76                         rte_service_library_initialized);
77                 return -EALREADY;
78         }
79
80         rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
81                         sizeof(struct rte_service_spec_impl),
82                         RTE_CACHE_LINE_SIZE);
83         if (!rte_services) {
84                 printf("error allocating rte services array\n");
85                 goto fail_mem;
86         }
87
88         lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
89                         sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
90         if (!lcore_states) {
91                 printf("error allocating core states array\n");
92                 goto fail_mem;
93         }
94
95         int i;
96         int count = 0;
97         struct rte_config *cfg = rte_eal_get_configuration();
98         for (i = 0; i < RTE_MAX_LCORE; i++) {
99                 if (lcore_config[i].core_role == ROLE_SERVICE) {
100                         if ((unsigned int)i == cfg->master_lcore)
101                                 continue;
102                         rte_service_lcore_add(i);
103                         count++;
104                 }
105         }
106
107         rte_service_library_initialized = 1;
108         return 0;
109 fail_mem:
110         if (rte_services)
111                 rte_free(rte_services);
112         if (lcore_states)
113                 rte_free(lcore_states);
114         return -ENOMEM;
115 }
116
117 /* returns 1 if service is registered and has not been unregistered
118  * Returns 0 if service never registered, or has been unregistered
119  */
120 static inline int
121 service_valid(uint32_t id)
122 {
123         return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
124 }
125
126 /* validate ID and retrieve service pointer, or return error value */
127 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
128         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
129                 return retval;                                          \
130         service = &rte_services[id];                                    \
131 } while (0)
132
133 /* returns 1 if statistics should be collected for service
134  * Returns 0 if statistics should not be collected for service
135  */
136 static inline int
137 service_stats_enabled(struct rte_service_spec_impl *impl)
138 {
139         return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
140 }
141
142 static inline int
143 service_mt_safe(struct rte_service_spec_impl *s)
144 {
145         return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
146 }
147
148 int32_t rte_service_set_stats_enable(uint32_t id, int32_t enabled)
149 {
150         struct rte_service_spec_impl *s;
151         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
152
153         if (enabled)
154                 s->internal_flags |= SERVICE_F_STATS_ENABLED;
155         else
156                 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
157
158         return 0;
159 }
160
161 int32_t rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
162 {
163         struct rte_service_spec_impl *s;
164         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
165
166         if (enabled)
167                 s->internal_flags |= SERVICE_F_START_CHECK;
168         else
169                 s->internal_flags &= ~(SERVICE_F_START_CHECK);
170
171         return 0;
172 }
173
174 uint32_t
175 rte_service_get_count(void)
176 {
177         return rte_service_count;
178 }
179
180 int32_t rte_service_get_by_name(const char *name, uint32_t *service_id)
181 {
182         if (!service_id)
183                 return -EINVAL;
184
185         int i;
186         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
187                 if (service_valid(i) &&
188                                 strcmp(name, rte_services[i].spec.name) == 0) {
189                         *service_id = i;
190                         return 0;
191                 }
192         }
193
194         return -ENODEV;
195 }
196
197 const char *
198 rte_service_get_name(uint32_t id)
199 {
200         struct rte_service_spec_impl *s;
201         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
202         return s->spec.name;
203 }
204
205 int32_t
206 rte_service_probe_capability(uint32_t id, uint32_t capability)
207 {
208         struct rte_service_spec_impl *s;
209         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
210         return !!(s->spec.capabilities & capability);
211 }
212
213 int32_t
214 rte_service_component_register(const struct rte_service_spec *spec,
215                                uint32_t *id_ptr)
216 {
217         uint32_t i;
218         int32_t free_slot = -1;
219
220         if (spec->callback == NULL || strlen(spec->name) == 0)
221                 return -EINVAL;
222
223         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
224                 if (!service_valid(i)) {
225                         free_slot = i;
226                         break;
227                 }
228         }
229
230         if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
231                 return -ENOSPC;
232
233         struct rte_service_spec_impl *s = &rte_services[free_slot];
234         s->spec = *spec;
235         s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
236
237         rte_smp_wmb();
238         rte_service_count++;
239
240         if (id_ptr)
241                 *id_ptr = free_slot;
242
243         return 0;
244 }
245
246 int32_t
247 rte_service_component_unregister(uint32_t id)
248 {
249         uint32_t i;
250         struct rte_service_spec_impl *s;
251         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
252
253         rte_service_count--;
254         rte_smp_wmb();
255
256         s->internal_flags &= ~(SERVICE_F_REGISTERED);
257
258         /* clear the run-bit in all cores */
259         for (i = 0; i < RTE_MAX_LCORE; i++)
260                 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
261
262         memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
263
264         return 0;
265 }
266
267 int32_t
268 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
269 {
270         struct rte_service_spec_impl *s;
271         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
272
273         if (runstate)
274                 s->comp_runstate = RUNSTATE_RUNNING;
275         else
276                 s->comp_runstate = RUNSTATE_STOPPED;
277
278         rte_smp_wmb();
279         return 0;
280 }
281
282 int32_t
283 rte_service_runstate_set(uint32_t id, uint32_t runstate)
284 {
285         struct rte_service_spec_impl *s;
286         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
287
288         if (runstate)
289                 s->app_runstate = RUNSTATE_RUNNING;
290         else
291                 s->app_runstate = RUNSTATE_STOPPED;
292
293         rte_smp_wmb();
294         return 0;
295 }
296
297 int32_t
298 rte_service_runstate_get(uint32_t id)
299 {
300         struct rte_service_spec_impl *s;
301         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
302         rte_smp_rmb();
303
304         int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
305         int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
306
307         return (s->app_runstate == RUNSTATE_RUNNING) &&
308                 (s->comp_runstate == RUNSTATE_RUNNING) &&
309                 (check_disabled | lcore_mapped);
310 }
311
312 static inline void
313 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
314                                struct core_state *cs, uint32_t service_idx)
315 {
316         void *userdata = s->spec.callback_userdata;
317
318         if (service_stats_enabled(s)) {
319                 uint64_t start = rte_rdtsc();
320                 s->spec.callback(userdata);
321                 uint64_t end = rte_rdtsc();
322                 s->cycles_spent += end - start;
323                 cs->calls_per_service[service_idx]++;
324                 s->calls++;
325         } else
326                 s->spec.callback(userdata);
327 }
328
329
330 static inline int32_t
331 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
332 {
333         if (!service_valid(i))
334                 return -EINVAL;
335         struct rte_service_spec_impl *s = &rte_services[i];
336         if (s->comp_runstate != RUNSTATE_RUNNING ||
337                         s->app_runstate != RUNSTATE_RUNNING ||
338                         !(service_mask & (UINT64_C(1) << i)))
339                 return -ENOEXEC;
340
341         /* check do we need cmpset, if MT safe or <= 1 core
342          * mapped, atomic ops are not required.
343          */
344         const int use_atomics = (service_mt_safe(s) == 0) &&
345                                 (rte_atomic32_read(&s->num_mapped_cores) > 1);
346         if (use_atomics) {
347                 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
348                         return -EBUSY;
349
350                 rte_service_runner_do_callback(s, cs, i);
351                 rte_atomic32_clear(&s->execute_lock);
352         } else
353                 rte_service_runner_do_callback(s, cs, i);
354
355         return 0;
356 }
357
358 int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
359                 uint32_t serialize_mt_unsafe)
360 {
361         /* run service on calling core, using all-ones as the service mask */
362         if (!service_valid(id))
363                 return -EINVAL;
364
365         struct core_state *cs = &lcore_states[rte_lcore_id()];
366         struct rte_service_spec_impl *s = &rte_services[id];
367
368         /* Atomically add this core to the mapped cores first, then examine if
369          * we can run the service. This avoids a race condition between
370          * checking the value, and atomically adding to the mapped count.
371          */
372         if (serialize_mt_unsafe)
373                 rte_atomic32_inc(&s->num_mapped_cores);
374
375         if (service_mt_safe(s) == 0 &&
376                         rte_atomic32_read(&s->num_mapped_cores) > 1) {
377                 if (serialize_mt_unsafe)
378                         rte_atomic32_dec(&s->num_mapped_cores);
379                 return -EBUSY;
380         }
381
382         int ret = service_run(id, cs, UINT64_MAX);
383
384         if (serialize_mt_unsafe)
385                 rte_atomic32_dec(&s->num_mapped_cores);
386
387         return ret;
388 }
389
390 static int32_t
391 rte_service_runner_func(void *arg)
392 {
393         RTE_SET_USED(arg);
394         uint32_t i;
395         const int lcore = rte_lcore_id();
396         struct core_state *cs = &lcore_states[lcore];
397
398         while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
399                 const uint64_t service_mask = cs->service_mask;
400
401                 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
402                         /* return value ignored as no change to code flow */
403                         service_run(i, cs, service_mask);
404                 }
405
406                 rte_smp_rmb();
407         }
408
409         lcore_config[lcore].state = WAIT;
410
411         return 0;
412 }
413
414 int32_t
415 rte_service_lcore_count(void)
416 {
417         int32_t count = 0;
418         uint32_t i;
419         for (i = 0; i < RTE_MAX_LCORE; i++)
420                 count += lcore_states[i].is_service_core;
421         return count;
422 }
423
424 int32_t
425 rte_service_lcore_list(uint32_t array[], uint32_t n)
426 {
427         uint32_t count = rte_service_lcore_count();
428         if (count > n)
429                 return -ENOMEM;
430
431         if (!array)
432                 return -EINVAL;
433
434         uint32_t i;
435         uint32_t idx = 0;
436         for (i = 0; i < RTE_MAX_LCORE; i++) {
437                 struct core_state *cs = &lcore_states[i];
438                 if (cs->is_service_core) {
439                         array[idx] = i;
440                         idx++;
441                 }
442         }
443
444         return count;
445 }
446
447 int32_t
448 rte_service_lcore_count_services(uint32_t lcore)
449 {
450         if (lcore >= RTE_MAX_LCORE)
451                 return -EINVAL;
452
453         struct core_state *cs = &lcore_states[lcore];
454         if (!cs->is_service_core)
455                 return -ENOTSUP;
456
457         return __builtin_popcountll(cs->service_mask);
458 }
459
460 int32_t
461 rte_service_start_with_defaults(void)
462 {
463         /* create a default mapping from cores to services, then start the
464          * services to make them transparent to unaware applications.
465          */
466         uint32_t i;
467         int ret;
468         uint32_t count = rte_service_get_count();
469
470         int32_t lcore_iter = 0;
471         uint32_t ids[RTE_MAX_LCORE] = {0};
472         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
473
474         if (lcore_count == 0)
475                 return -ENOTSUP;
476
477         for (i = 0; (int)i < lcore_count; i++)
478                 rte_service_lcore_start(ids[i]);
479
480         for (i = 0; i < count; i++) {
481                 /* do 1:1 core mapping here, with each service getting
482                  * assigned a single core by default. Adding multiple services
483                  * should multiplex to a single core, or 1:1 if there are the
484                  * same amount of services as service-cores
485                  */
486                 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
487                 if (ret)
488                         return -ENODEV;
489
490                 lcore_iter++;
491                 if (lcore_iter >= lcore_count)
492                         lcore_iter = 0;
493
494                 ret = rte_service_runstate_set(i, 1);
495                 if (ret)
496                         return -ENOEXEC;
497         }
498
499         return 0;
500 }
501
502 static int32_t
503 service_update(struct rte_service_spec *service, uint32_t lcore,
504                 uint32_t *set, uint32_t *enabled)
505 {
506         uint32_t i;
507         int32_t sid = -1;
508
509         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
510                 if ((struct rte_service_spec *)&rte_services[i] == service &&
511                                 service_valid(i)) {
512                         sid = i;
513                         break;
514                 }
515         }
516
517         if (sid == -1 || lcore >= RTE_MAX_LCORE)
518                 return -EINVAL;
519
520         if (!lcore_states[lcore].is_service_core)
521                 return -EINVAL;
522
523         uint64_t sid_mask = UINT64_C(1) << sid;
524         if (set) {
525                 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
526                         sid_mask;
527
528                 if (*set && !lcore_mapped) {
529                         lcore_states[lcore].service_mask |= sid_mask;
530                         rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
531                 }
532                 if (!*set && lcore_mapped) {
533                         lcore_states[lcore].service_mask &= ~(sid_mask);
534                         rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
535                 }
536         }
537
538         if (enabled)
539                 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
540
541         rte_smp_wmb();
542
543         return 0;
544 }
545
546 int32_t
547 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
548 {
549         struct rte_service_spec_impl *s;
550         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
551         uint32_t on = enabled > 0;
552         return service_update(&s->spec, lcore, &on, 0);
553 }
554
555 int32_t
556 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
557 {
558         struct rte_service_spec_impl *s;
559         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
560         uint32_t enabled;
561         int ret = service_update(&s->spec, lcore, 0, &enabled);
562         if (ret == 0)
563                 return enabled;
564         return ret;
565 }
566
567 static void
568 set_lcore_state(uint32_t lcore, int32_t state)
569 {
570         /* mark core state in hugepage backed config */
571         struct rte_config *cfg = rte_eal_get_configuration();
572         cfg->lcore_role[lcore] = state;
573
574         /* mark state in process local lcore_config */
575         lcore_config[lcore].core_role = state;
576
577         /* update per-lcore optimized state tracking */
578         lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
579 }
580
581 int32_t rte_service_lcore_reset_all(void)
582 {
583         /* loop over cores, reset all to mask 0 */
584         uint32_t i;
585         for (i = 0; i < RTE_MAX_LCORE; i++) {
586                 if (lcore_states[i].is_service_core) {
587                         lcore_states[i].service_mask = 0;
588                         set_lcore_state(i, ROLE_RTE);
589                         lcore_states[i].runstate = RUNSTATE_STOPPED;
590                 }
591         }
592         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
593                 rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
594
595         rte_smp_wmb();
596
597         return 0;
598 }
599
600 int32_t
601 rte_service_lcore_add(uint32_t lcore)
602 {
603         if (lcore >= RTE_MAX_LCORE)
604                 return -EINVAL;
605         if (lcore_states[lcore].is_service_core)
606                 return -EALREADY;
607
608         set_lcore_state(lcore, ROLE_SERVICE);
609
610         /* ensure that after adding a core the mask and state are defaults */
611         lcore_states[lcore].service_mask = 0;
612         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
613
614         rte_smp_wmb();
615
616         return rte_eal_wait_lcore(lcore);
617 }
618
619 int32_t
620 rte_service_lcore_del(uint32_t lcore)
621 {
622         if (lcore >= RTE_MAX_LCORE)
623                 return -EINVAL;
624
625         struct core_state *cs = &lcore_states[lcore];
626         if (!cs->is_service_core)
627                 return -EINVAL;
628
629         if (cs->runstate != RUNSTATE_STOPPED)
630                 return -EBUSY;
631
632         set_lcore_state(lcore, ROLE_RTE);
633
634         rte_smp_wmb();
635         return 0;
636 }
637
638 int32_t
639 rte_service_lcore_start(uint32_t lcore)
640 {
641         if (lcore >= RTE_MAX_LCORE)
642                 return -EINVAL;
643
644         struct core_state *cs = &lcore_states[lcore];
645         if (!cs->is_service_core)
646                 return -EINVAL;
647
648         if (cs->runstate == RUNSTATE_RUNNING)
649                 return -EALREADY;
650
651         /* set core to run state first, and then launch otherwise it will
652          * return immediately as runstate keeps it in the service poll loop
653          */
654         lcore_states[lcore].runstate = RUNSTATE_RUNNING;
655
656         int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
657         /* returns -EBUSY if the core is already launched, 0 on success */
658         return ret;
659 }
660
661 int32_t
662 rte_service_lcore_stop(uint32_t lcore)
663 {
664         if (lcore >= RTE_MAX_LCORE)
665                 return -EINVAL;
666
667         if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
668                 return -EALREADY;
669
670         uint32_t i;
671         uint64_t service_mask = lcore_states[lcore].service_mask;
672         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
673                 int32_t enabled = service_mask & (UINT64_C(1) << i);
674                 int32_t service_running = rte_service_runstate_get(i);
675                 int32_t only_core = (1 ==
676                         rte_atomic32_read(&rte_services[i].num_mapped_cores));
677
678                 /* if the core is mapped, and the service is running, and this
679                  * is the only core that is mapped, the service would cease to
680                  * run if this core stopped, so fail instead.
681                  */
682                 if (enabled && service_running && only_core)
683                         return -EBUSY;
684         }
685
686         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
687
688         return 0;
689 }
690
691 int32_t
692 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint32_t *attr_value)
693 {
694         struct rte_service_spec_impl *s;
695         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
696
697         if (!attr_value)
698                 return -EINVAL;
699
700         switch (attr_id) {
701         case RTE_SERVICE_ATTR_CYCLES:
702                 *attr_value = s->cycles_spent;
703                 return 0;
704         case RTE_SERVICE_ATTR_CALL_COUNT:
705                 *attr_value = s->calls;
706                 return 0;
707         default:
708                 return -EINVAL;
709         }
710 }
711
712 static void
713 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
714                      uint64_t all_cycles, uint32_t reset)
715 {
716         /* avoid divide by zero */
717         if (all_cycles == 0)
718                 all_cycles = 1;
719
720         int calls = 1;
721         if (s->calls != 0)
722                 calls = s->calls;
723
724         if (reset) {
725                 s->cycles_spent = 0;
726                 s->calls = 0;
727                 return;
728         }
729
730         fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
731                         PRIu64"\tavg: %"PRIu64"\n",
732                         s->spec.name, service_stats_enabled(s), s->calls,
733                         s->cycles_spent, s->cycles_spent / calls);
734 }
735
736 int32_t
737 rte_service_attr_reset_all(uint32_t id)
738 {
739         struct rte_service_spec_impl *s;
740         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
741
742         int reset = 1;
743         rte_service_dump_one(NULL, s, 0, reset);
744         return 0;
745 }
746
747 static void
748 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
749 {
750         uint32_t i;
751         struct core_state *cs = &lcore_states[lcore];
752
753         fprintf(f, "%02d\t", lcore);
754         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
755                 if (!service_valid(i))
756                         continue;
757                 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
758                 if (reset)
759                         cs->calls_per_service[i] = 0;
760         }
761         fprintf(f, "\n");
762 }
763
764 int32_t rte_service_dump(FILE *f, uint32_t id)
765 {
766         uint32_t i;
767         int print_one = (id != UINT32_MAX);
768
769         uint64_t total_cycles = 0;
770
771         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
772                 if (!service_valid(i))
773                         continue;
774                 total_cycles += rte_services[i].cycles_spent;
775         }
776
777         /* print only the specified service */
778         if (print_one) {
779                 struct rte_service_spec_impl *s;
780                 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
781                 fprintf(f, "Service %s Summary\n", s->spec.name);
782                 uint32_t reset = 0;
783                 rte_service_dump_one(f, s, total_cycles, reset);
784                 return 0;
785         }
786
787         /* print all services, as UINT32_MAX was passed as id */
788         fprintf(f, "Services Summary\n");
789         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
790                 if (!service_valid(i))
791                         continue;
792                 uint32_t reset = 0;
793                 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
794         }
795
796         fprintf(f, "Service Cores Summary\n");
797         for (i = 0; i < RTE_MAX_LCORE; i++) {
798                 if (lcore_config[i].core_role != ROLE_SERVICE)
799                         continue;
800
801                 uint32_t reset = 0;
802                 service_dump_calls_per_lcore(f, i, reset);
803         }
804
805         return 0;
806 }