service: add function to run on app lcore
[dpdk.git] / lib / librte_eal / common / rte_service.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <unistd.h>
36 #include <inttypes.h>
37 #include <limits.h>
38 #include <string.h>
39 #include <dirent.h>
40
41 #include <rte_service.h>
42 #include "include/rte_service_component.h"
43
44 #include <rte_eal.h>
45 #include <rte_lcore.h>
46 #include <rte_common.h>
47 #include <rte_debug.h>
48 #include <rte_cycles.h>
49 #include <rte_atomic.h>
50 #include <rte_memory.h>
51 #include <rte_malloc.h>
52
53 #define RTE_SERVICE_NUM_MAX 64
54
55 #define SERVICE_F_REGISTERED    (1 << 0)
56 #define SERVICE_F_STATS_ENABLED (1 << 1)
57
58 /* runstates for services and lcores, denoting if they are active or not */
59 #define RUNSTATE_STOPPED 0
60 #define RUNSTATE_RUNNING 1
61
62 /* internal representation of a service */
63 struct rte_service_spec_impl {
64         /* public part of the struct */
65         struct rte_service_spec spec;
66
67         /* atomic lock that when set indicates a service core is currently
68          * running this service callback. When not set, a core may take the
69          * lock and then run the service callback.
70          */
71         rte_atomic32_t execute_lock;
72
73         /* API set/get-able variables */
74         int8_t app_runstate;
75         int8_t comp_runstate;
76         uint8_t internal_flags;
77
78         /* per service statistics */
79         uint32_t num_mapped_cores;
80         uint64_t calls;
81         uint64_t cycles_spent;
82 } __rte_cache_aligned;
83
84 /* the internal values of a service core */
85 struct core_state {
86         /* map of services IDs are run on this core */
87         uint64_t service_mask;
88         uint8_t runstate; /* running or stopped */
89         uint8_t is_service_core; /* set if core is currently a service core */
90
91         /* extreme statistics */
92         uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
93 } __rte_cache_aligned;
94
95 static uint32_t rte_service_count;
96 static struct rte_service_spec_impl *rte_services;
97 static struct core_state *lcore_states;
98 static uint32_t rte_service_library_initialized;
99
100 int32_t rte_service_init(void)
101 {
102         if (rte_service_library_initialized) {
103                 printf("service library init() called, init flag %d\n",
104                         rte_service_library_initialized);
105                 return -EALREADY;
106         }
107
108         rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
109                         sizeof(struct rte_service_spec_impl),
110                         RTE_CACHE_LINE_SIZE);
111         if (!rte_services) {
112                 printf("error allocating rte services array\n");
113                 return -ENOMEM;
114         }
115
116         lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
117                         sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
118         if (!lcore_states) {
119                 printf("error allocating core states array\n");
120                 return -ENOMEM;
121         }
122
123         int i;
124         int count = 0;
125         struct rte_config *cfg = rte_eal_get_configuration();
126         for (i = 0; i < RTE_MAX_LCORE; i++) {
127                 if (lcore_config[i].core_role == ROLE_SERVICE) {
128                         if ((unsigned int)i == cfg->master_lcore)
129                                 continue;
130                         rte_service_lcore_add(i);
131                         count++;
132                 }
133         }
134
135         rte_service_library_initialized = 1;
136         return 0;
137 }
138
139 /* returns 1 if service is registered and has not been unregistered
140  * Returns 0 if service never registered, or has been unregistered
141  */
142 static inline int
143 service_valid(uint32_t id)
144 {
145         return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
146 }
147
148 /* validate ID and retrieve service pointer, or return error value */
149 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
150         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
151                 return retval;                                          \
152         service = &rte_services[id];                                    \
153 } while (0)
154
155 /* returns 1 if statistics should be colleced for service
156  * Returns 0 if statistics should not be collected for service
157  */
158 static inline int
159 service_stats_enabled(struct rte_service_spec_impl *impl)
160 {
161         return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
162 }
163
164 static inline int
165 service_mt_safe(struct rte_service_spec_impl *s)
166 {
167         return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
168 }
169
170 int32_t rte_service_set_stats_enable(uint32_t id, int32_t enabled)
171 {
172         struct rte_service_spec_impl *s;
173         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
174
175         if (enabled)
176                 s->internal_flags |= SERVICE_F_STATS_ENABLED;
177         else
178                 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
179
180         return 0;
181 }
182
183 uint32_t
184 rte_service_get_count(void)
185 {
186         return rte_service_count;
187 }
188
189 int32_t rte_service_get_by_name(const char *name, uint32_t *service_id)
190 {
191         if (!service_id)
192                 return -EINVAL;
193
194         int i;
195         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
196                 if (service_valid(i) &&
197                                 strcmp(name, rte_services[i].spec.name) == 0) {
198                         *service_id = i;
199                         return 0;
200                 }
201         }
202
203         return -ENODEV;
204 }
205
206 const char *
207 rte_service_get_name(uint32_t id)
208 {
209         struct rte_service_spec_impl *s;
210         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
211         return s->spec.name;
212 }
213
214 int32_t
215 rte_service_probe_capability(uint32_t id, uint32_t capability)
216 {
217         struct rte_service_spec_impl *s;
218         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
219         return !!(s->spec.capabilities & capability);
220 }
221
222 int32_t
223 rte_service_component_register(const struct rte_service_spec *spec,
224                                uint32_t *id_ptr)
225 {
226         uint32_t i;
227         int32_t free_slot = -1;
228
229         if (spec->callback == NULL || strlen(spec->name) == 0)
230                 return -EINVAL;
231
232         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
233                 if (!service_valid(i)) {
234                         free_slot = i;
235                         break;
236                 }
237         }
238
239         if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
240                 return -ENOSPC;
241
242         struct rte_service_spec_impl *s = &rte_services[free_slot];
243         s->spec = *spec;
244         s->internal_flags |= SERVICE_F_REGISTERED;
245
246         rte_smp_wmb();
247         rte_service_count++;
248
249         if (id_ptr)
250                 *id_ptr = free_slot;
251
252         return 0;
253 }
254
255 int32_t
256 rte_service_component_unregister(uint32_t id)
257 {
258         uint32_t i;
259         struct rte_service_spec_impl *s;
260         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
261
262         rte_service_count--;
263         rte_smp_wmb();
264
265         s->internal_flags &= ~(SERVICE_F_REGISTERED);
266
267         /* clear the run-bit in all cores */
268         for (i = 0; i < RTE_MAX_LCORE; i++)
269                 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
270
271         memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
272
273         return 0;
274 }
275
276 int32_t
277 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
278 {
279         struct rte_service_spec_impl *s;
280         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
281
282         if (runstate)
283                 s->comp_runstate = RUNSTATE_RUNNING;
284         else
285                 s->comp_runstate = RUNSTATE_STOPPED;
286
287         rte_smp_wmb();
288         return 0;
289 }
290
291 int32_t
292 rte_service_runstate_set(uint32_t id, uint32_t runstate)
293 {
294         struct rte_service_spec_impl *s;
295         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
296
297         if (runstate)
298                 s->app_runstate = RUNSTATE_RUNNING;
299         else
300                 s->app_runstate = RUNSTATE_STOPPED;
301
302         rte_smp_wmb();
303         return 0;
304 }
305
306 int32_t
307 rte_service_runstate_get(uint32_t id)
308 {
309         struct rte_service_spec_impl *s;
310         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
311         rte_smp_rmb();
312         return (s->app_runstate == RUNSTATE_RUNNING) &&
313                 (s->comp_runstate == RUNSTATE_RUNNING) &&
314                 (s->num_mapped_cores > 0);
315 }
316
317 static inline void
318 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
319                                struct core_state *cs, uint32_t service_idx)
320 {
321         void *userdata = s->spec.callback_userdata;
322
323         if (service_stats_enabled(s)) {
324                 uint64_t start = rte_rdtsc();
325                 s->spec.callback(userdata);
326                 uint64_t end = rte_rdtsc();
327                 s->cycles_spent += end - start;
328                 cs->calls_per_service[service_idx]++;
329                 s->calls++;
330         } else
331                 s->spec.callback(userdata);
332 }
333
334
335 static inline int32_t
336 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
337 {
338         if (!service_valid(i))
339                 return -EINVAL;
340         struct rte_service_spec_impl *s = &rte_services[i];
341         if (s->comp_runstate != RUNSTATE_RUNNING ||
342                         s->app_runstate != RUNSTATE_RUNNING ||
343                         !(service_mask & (UINT64_C(1) << i)))
344                 return -ENOEXEC;
345
346         /* check do we need cmpset, if MT safe or <= 1 core
347          * mapped, atomic ops are not required.
348          */
349         const int use_atomics = (service_mt_safe(s) == 0) &&
350                                 (s->num_mapped_cores > 1);
351         if (use_atomics) {
352                 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
353                         return -EBUSY;
354
355                 rte_service_runner_do_callback(s, cs, i);
356                 rte_atomic32_clear(&s->execute_lock);
357         } else
358                 rte_service_runner_do_callback(s, cs, i);
359
360         return 0;
361 }
362
363 int32_t rte_service_run_iter_on_app_lcore(uint32_t id)
364 {
365         /* run service on calling core, using all-ones as the service mask */
366         struct core_state *cs = &lcore_states[rte_lcore_id()];
367         return service_run(id, cs, UINT64_MAX);
368 }
369
370 static int32_t
371 rte_service_runner_func(void *arg)
372 {
373         RTE_SET_USED(arg);
374         uint32_t i;
375         const int lcore = rte_lcore_id();
376         struct core_state *cs = &lcore_states[lcore];
377
378         while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
379                 const uint64_t service_mask = cs->service_mask;
380
381                 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
382                         /* return value ignored as no change to code flow */
383                         service_run(i, cs, service_mask);
384                 }
385
386                 rte_smp_rmb();
387         }
388
389         lcore_config[lcore].state = WAIT;
390
391         return 0;
392 }
393
394 int32_t
395 rte_service_lcore_count(void)
396 {
397         int32_t count = 0;
398         uint32_t i;
399         for (i = 0; i < RTE_MAX_LCORE; i++)
400                 count += lcore_states[i].is_service_core;
401         return count;
402 }
403
404 int32_t
405 rte_service_lcore_list(uint32_t array[], uint32_t n)
406 {
407         uint32_t count = rte_service_lcore_count();
408         if (count > n)
409                 return -ENOMEM;
410
411         if (!array)
412                 return -EINVAL;
413
414         uint32_t i;
415         uint32_t idx = 0;
416         for (i = 0; i < RTE_MAX_LCORE; i++) {
417                 struct core_state *cs = &lcore_states[i];
418                 if (cs->is_service_core) {
419                         array[idx] = i;
420                         idx++;
421                 }
422         }
423
424         return count;
425 }
426
427 int32_t
428 rte_service_lcore_count_services(uint32_t lcore)
429 {
430         if (lcore >= RTE_MAX_LCORE)
431                 return -EINVAL;
432
433         struct core_state *cs = &lcore_states[lcore];
434         if (!cs->is_service_core)
435                 return -ENOTSUP;
436
437         return __builtin_popcountll(cs->service_mask);
438 }
439
440 int32_t
441 rte_service_start_with_defaults(void)
442 {
443         /* create a default mapping from cores to services, then start the
444          * services to make them transparent to unaware applications.
445          */
446         uint32_t i;
447         int ret;
448         uint32_t count = rte_service_get_count();
449
450         int32_t lcore_iter = 0;
451         uint32_t ids[RTE_MAX_LCORE] = {0};
452         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
453
454         if (lcore_count == 0)
455                 return -ENOTSUP;
456
457         for (i = 0; (int)i < lcore_count; i++)
458                 rte_service_lcore_start(ids[i]);
459
460         for (i = 0; i < count; i++) {
461                 /* do 1:1 core mapping here, with each service getting
462                  * assigned a single core by default. Adding multiple services
463                  * should multiplex to a single core, or 1:1 if there are the
464                  * same amount of services as service-cores
465                  */
466                 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
467                 if (ret)
468                         return -ENODEV;
469
470                 lcore_iter++;
471                 if (lcore_iter >= lcore_count)
472                         lcore_iter = 0;
473
474                 ret = rte_service_runstate_set(i, 1);
475                 if (ret)
476                         return -ENOEXEC;
477         }
478
479         return 0;
480 }
481
482 static int32_t
483 service_update(struct rte_service_spec *service, uint32_t lcore,
484                 uint32_t *set, uint32_t *enabled)
485 {
486         uint32_t i;
487         int32_t sid = -1;
488
489         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
490                 if ((struct rte_service_spec *)&rte_services[i] == service &&
491                                 service_valid(i)) {
492                         sid = i;
493                         break;
494                 }
495         }
496
497         if (sid == -1 || lcore >= RTE_MAX_LCORE)
498                 return -EINVAL;
499
500         if (!lcore_states[lcore].is_service_core)
501                 return -EINVAL;
502
503         uint64_t sid_mask = UINT64_C(1) << sid;
504         if (set) {
505                 if (*set) {
506                         lcore_states[lcore].service_mask |= sid_mask;
507                         rte_services[sid].num_mapped_cores++;
508                 } else {
509                         lcore_states[lcore].service_mask &= ~(sid_mask);
510                         rte_services[sid].num_mapped_cores--;
511                 }
512         }
513
514         if (enabled)
515                 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
516
517         rte_smp_wmb();
518
519         return 0;
520 }
521
522 int32_t
523 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
524 {
525         struct rte_service_spec_impl *s;
526         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
527         uint32_t on = enabled > 0;
528         return service_update(&s->spec, lcore, &on, 0);
529 }
530
531 int32_t
532 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
533 {
534         struct rte_service_spec_impl *s;
535         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
536         uint32_t enabled;
537         int ret = service_update(&s->spec, lcore, 0, &enabled);
538         if (ret == 0)
539                 return enabled;
540         return ret;
541 }
542
543 int32_t rte_service_lcore_reset_all(void)
544 {
545         /* loop over cores, reset all to mask 0 */
546         uint32_t i;
547         for (i = 0; i < RTE_MAX_LCORE; i++) {
548                 lcore_states[i].service_mask = 0;
549                 lcore_states[i].is_service_core = 0;
550                 lcore_states[i].runstate = RUNSTATE_STOPPED;
551         }
552         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
553                 rte_services[i].num_mapped_cores = 0;
554
555         rte_smp_wmb();
556
557         return 0;
558 }
559
560 static void
561 set_lcore_state(uint32_t lcore, int32_t state)
562 {
563         /* mark core state in hugepage backed config */
564         struct rte_config *cfg = rte_eal_get_configuration();
565         cfg->lcore_role[lcore] = state;
566
567         /* mark state in process local lcore_config */
568         lcore_config[lcore].core_role = state;
569
570         /* update per-lcore optimized state tracking */
571         lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
572 }
573
574 int32_t
575 rte_service_lcore_add(uint32_t lcore)
576 {
577         if (lcore >= RTE_MAX_LCORE)
578                 return -EINVAL;
579         if (lcore_states[lcore].is_service_core)
580                 return -EALREADY;
581
582         set_lcore_state(lcore, ROLE_SERVICE);
583
584         /* ensure that after adding a core the mask and state are defaults */
585         lcore_states[lcore].service_mask = 0;
586         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
587
588         rte_smp_wmb();
589
590         return rte_eal_wait_lcore(lcore);
591 }
592
593 int32_t
594 rte_service_lcore_del(uint32_t lcore)
595 {
596         if (lcore >= RTE_MAX_LCORE)
597                 return -EINVAL;
598
599         struct core_state *cs = &lcore_states[lcore];
600         if (!cs->is_service_core)
601                 return -EINVAL;
602
603         if (cs->runstate != RUNSTATE_STOPPED)
604                 return -EBUSY;
605
606         set_lcore_state(lcore, ROLE_RTE);
607
608         rte_smp_wmb();
609         return 0;
610 }
611
612 int32_t
613 rte_service_lcore_start(uint32_t lcore)
614 {
615         if (lcore >= RTE_MAX_LCORE)
616                 return -EINVAL;
617
618         struct core_state *cs = &lcore_states[lcore];
619         if (!cs->is_service_core)
620                 return -EINVAL;
621
622         if (cs->runstate == RUNSTATE_RUNNING)
623                 return -EALREADY;
624
625         /* set core to run state first, and then launch otherwise it will
626          * return immediately as runstate keeps it in the service poll loop
627          */
628         lcore_states[lcore].runstate = RUNSTATE_RUNNING;
629
630         int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
631         /* returns -EBUSY if the core is already launched, 0 on success */
632         return ret;
633 }
634
635 int32_t
636 rte_service_lcore_stop(uint32_t lcore)
637 {
638         if (lcore >= RTE_MAX_LCORE)
639                 return -EINVAL;
640
641         if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
642                 return -EALREADY;
643
644         uint32_t i;
645         uint64_t service_mask = lcore_states[lcore].service_mask;
646         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
647                 int32_t enabled = service_mask & (UINT64_C(1) << i);
648                 int32_t service_running = rte_service_runstate_get(i);
649                 int32_t only_core = rte_services[i].num_mapped_cores == 1;
650
651                 /* if the core is mapped, and the service is running, and this
652                  * is the only core that is mapped, the service would cease to
653                  * run if this core stopped, so fail instead.
654                  */
655                 if (enabled && service_running && only_core)
656                         return -EBUSY;
657         }
658
659         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
660
661         return 0;
662 }
663
664 static void
665 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
666                      uint64_t all_cycles, uint32_t reset)
667 {
668         /* avoid divide by zero */
669         if (all_cycles == 0)
670                 all_cycles = 1;
671
672         int calls = 1;
673         if (s->calls != 0)
674                 calls = s->calls;
675
676         fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
677                         PRIu64"\tavg: %"PRIu64"\n",
678                         s->spec.name, service_stats_enabled(s), s->calls,
679                         s->cycles_spent, s->cycles_spent / calls);
680
681         if (reset) {
682                 s->cycles_spent = 0;
683                 s->calls = 0;
684         }
685 }
686
687 static void
688 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
689 {
690         uint32_t i;
691         struct core_state *cs = &lcore_states[lcore];
692
693         fprintf(f, "%02d\t", lcore);
694         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
695                 if (!service_valid(i))
696                         continue;
697                 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
698                 if (reset)
699                         cs->calls_per_service[i] = 0;
700         }
701         fprintf(f, "\n");
702 }
703
704 int32_t rte_service_dump(FILE *f, uint32_t id)
705 {
706         uint32_t i;
707         int print_one = (id != UINT32_MAX);
708
709         uint64_t total_cycles = 0;
710
711         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
712                 if (!service_valid(i))
713                         continue;
714                 total_cycles += rte_services[i].cycles_spent;
715         }
716
717         /* print only the specified service */
718         if (print_one) {
719                 struct rte_service_spec_impl *s;
720                 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
721                 fprintf(f, "Service %s Summary\n", s->spec.name);
722                 uint32_t reset = 0;
723                 rte_service_dump_one(f, s, total_cycles, reset);
724                 return 0;
725         }
726
727         /* print all services, as UINT32_MAX was passed as id */
728         fprintf(f, "Services Summary\n");
729         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
730                 if (!service_valid(i))
731                         continue;
732                 uint32_t reset = 1;
733                 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
734         }
735
736         fprintf(f, "Service Cores Summary\n");
737         for (i = 0; i < RTE_MAX_LCORE; i++) {
738                 if (lcore_config[i].core_role != ROLE_SERVICE)
739                         continue;
740
741                 uint32_t reset = 1;
742                 service_dump_calls_per_lcore(f, i, reset);
743         }
744
745         return 0;
746 }