service: add attribute API
[dpdk.git] / lib / librte_eal / common / rte_service.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <string.h>
10 #include <dirent.h>
11
12 #include <rte_compat.h>
13 #include <rte_service.h>
14 #include "include/rte_service_component.h"
15
16 #include <rte_eal.h>
17 #include <rte_lcore.h>
18 #include <rte_common.h>
19 #include <rte_debug.h>
20 #include <rte_cycles.h>
21 #include <rte_atomic.h>
22 #include <rte_memory.h>
23 #include <rte_malloc.h>
24
25 #define RTE_SERVICE_NUM_MAX 64
26
27 #define SERVICE_F_REGISTERED    (1 << 0)
28 #define SERVICE_F_STATS_ENABLED (1 << 1)
29 #define SERVICE_F_START_CHECK   (1 << 2)
30
31 /* runstates for services and lcores, denoting if they are active or not */
32 #define RUNSTATE_STOPPED 0
33 #define RUNSTATE_RUNNING 1
34
35 /* internal representation of a service */
36 struct rte_service_spec_impl {
37         /* public part of the struct */
38         struct rte_service_spec spec;
39
40         /* atomic lock that when set indicates a service core is currently
41          * running this service callback. When not set, a core may take the
42          * lock and then run the service callback.
43          */
44         rte_atomic32_t execute_lock;
45
46         /* API set/get-able variables */
47         int8_t app_runstate;
48         int8_t comp_runstate;
49         uint8_t internal_flags;
50
51         /* per service statistics */
52         rte_atomic32_t num_mapped_cores;
53         uint64_t calls;
54         uint64_t cycles_spent;
55 } __rte_cache_aligned;
56
57 /* the internal values of a service core */
58 struct core_state {
59         /* map of services IDs are run on this core */
60         uint64_t service_mask;
61         uint8_t runstate; /* running or stopped */
62         uint8_t is_service_core; /* set if core is currently a service core */
63
64         uint64_t loops;
65         uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
66 } __rte_cache_aligned;
67
68 static uint32_t rte_service_count;
69 static struct rte_service_spec_impl *rte_services;
70 static struct core_state *lcore_states;
71 static uint32_t rte_service_library_initialized;
72
73 int32_t rte_service_init(void)
74 {
75         if (rte_service_library_initialized) {
76                 printf("service library init() called, init flag %d\n",
77                         rte_service_library_initialized);
78                 return -EALREADY;
79         }
80
81         rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
82                         sizeof(struct rte_service_spec_impl),
83                         RTE_CACHE_LINE_SIZE);
84         if (!rte_services) {
85                 printf("error allocating rte services array\n");
86                 goto fail_mem;
87         }
88
89         lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
90                         sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
91         if (!lcore_states) {
92                 printf("error allocating core states array\n");
93                 goto fail_mem;
94         }
95
96         int i;
97         int count = 0;
98         struct rte_config *cfg = rte_eal_get_configuration();
99         for (i = 0; i < RTE_MAX_LCORE; i++) {
100                 if (lcore_config[i].core_role == ROLE_SERVICE) {
101                         if ((unsigned int)i == cfg->master_lcore)
102                                 continue;
103                         rte_service_lcore_add(i);
104                         count++;
105                 }
106         }
107
108         rte_service_library_initialized = 1;
109         return 0;
110 fail_mem:
111         if (rte_services)
112                 rte_free(rte_services);
113         if (lcore_states)
114                 rte_free(lcore_states);
115         return -ENOMEM;
116 }
117
118 void
119 rte_service_finalize(void)
120 {
121         if (!rte_service_library_initialized)
122                 return;
123
124         if (rte_services)
125                 rte_free(rte_services);
126
127         if (lcore_states)
128                 rte_free(lcore_states);
129
130         rte_service_library_initialized = 0;
131 }
132
133 /* returns 1 if service is registered and has not been unregistered
134  * Returns 0 if service never registered, or has been unregistered
135  */
136 static inline int
137 service_valid(uint32_t id)
138 {
139         return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
140 }
141
142 /* validate ID and retrieve service pointer, or return error value */
143 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
144         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
145                 return retval;                                          \
146         service = &rte_services[id];                                    \
147 } while (0)
148
149 /* returns 1 if statistics should be collected for service
150  * Returns 0 if statistics should not be collected for service
151  */
152 static inline int
153 service_stats_enabled(struct rte_service_spec_impl *impl)
154 {
155         return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
156 }
157
158 static inline int
159 service_mt_safe(struct rte_service_spec_impl *s)
160 {
161         return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
162 }
163
164 int32_t
165 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
166 {
167         struct rte_service_spec_impl *s;
168         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
169
170         if (enabled)
171                 s->internal_flags |= SERVICE_F_STATS_ENABLED;
172         else
173                 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
174
175         return 0;
176 }
177
178 int32_t
179 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
180 {
181         struct rte_service_spec_impl *s;
182         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
183
184         if (enabled)
185                 s->internal_flags |= SERVICE_F_START_CHECK;
186         else
187                 s->internal_flags &= ~(SERVICE_F_START_CHECK);
188
189         return 0;
190 }
191
192 uint32_t
193 rte_service_get_count(void)
194 {
195         return rte_service_count;
196 }
197
198 int32_t
199 rte_service_get_by_name(const char *name, uint32_t *service_id)
200 {
201         if (!service_id)
202                 return -EINVAL;
203
204         int i;
205         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
206                 if (service_valid(i) &&
207                                 strcmp(name, rte_services[i].spec.name) == 0) {
208                         *service_id = i;
209                         return 0;
210                 }
211         }
212
213         return -ENODEV;
214 }
215
216 const char *
217 rte_service_get_name(uint32_t id)
218 {
219         struct rte_service_spec_impl *s;
220         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
221         return s->spec.name;
222 }
223
224 int32_t
225 rte_service_probe_capability(uint32_t id, uint32_t capability)
226 {
227         struct rte_service_spec_impl *s;
228         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
229         return !!(s->spec.capabilities & capability);
230 }
231
232 int32_t
233 rte_service_component_register(const struct rte_service_spec *spec,
234                                uint32_t *id_ptr)
235 {
236         uint32_t i;
237         int32_t free_slot = -1;
238
239         if (spec->callback == NULL || strlen(spec->name) == 0)
240                 return -EINVAL;
241
242         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
243                 if (!service_valid(i)) {
244                         free_slot = i;
245                         break;
246                 }
247         }
248
249         if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
250                 return -ENOSPC;
251
252         struct rte_service_spec_impl *s = &rte_services[free_slot];
253         s->spec = *spec;
254         s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
255
256         rte_smp_wmb();
257         rte_service_count++;
258
259         if (id_ptr)
260                 *id_ptr = free_slot;
261
262         return 0;
263 }
264
265 int32_t
266 rte_service_component_unregister(uint32_t id)
267 {
268         uint32_t i;
269         struct rte_service_spec_impl *s;
270         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
271
272         rte_service_count--;
273         rte_smp_wmb();
274
275         s->internal_flags &= ~(SERVICE_F_REGISTERED);
276
277         /* clear the run-bit in all cores */
278         for (i = 0; i < RTE_MAX_LCORE; i++)
279                 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
280
281         memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
282
283         return 0;
284 }
285
286 int32_t
287 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
288 {
289         struct rte_service_spec_impl *s;
290         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
291
292         if (runstate)
293                 s->comp_runstate = RUNSTATE_RUNNING;
294         else
295                 s->comp_runstate = RUNSTATE_STOPPED;
296
297         rte_smp_wmb();
298         return 0;
299 }
300
301 int32_t
302 rte_service_runstate_set(uint32_t id, uint32_t runstate)
303 {
304         struct rte_service_spec_impl *s;
305         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
306
307         if (runstate)
308                 s->app_runstate = RUNSTATE_RUNNING;
309         else
310                 s->app_runstate = RUNSTATE_STOPPED;
311
312         rte_smp_wmb();
313         return 0;
314 }
315
316 int32_t
317 rte_service_runstate_get(uint32_t id)
318 {
319         struct rte_service_spec_impl *s;
320         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
321         rte_smp_rmb();
322
323         int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
324         int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
325
326         return (s->app_runstate == RUNSTATE_RUNNING) &&
327                 (s->comp_runstate == RUNSTATE_RUNNING) &&
328                 (check_disabled | lcore_mapped);
329 }
330
331 static inline void
332 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
333                                struct core_state *cs, uint32_t service_idx)
334 {
335         void *userdata = s->spec.callback_userdata;
336
337         if (service_stats_enabled(s)) {
338                 uint64_t start = rte_rdtsc();
339                 s->spec.callback(userdata);
340                 uint64_t end = rte_rdtsc();
341                 s->cycles_spent += end - start;
342                 cs->calls_per_service[service_idx]++;
343                 s->calls++;
344         } else
345                 s->spec.callback(userdata);
346 }
347
348
349 static inline int32_t
350 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
351 {
352         if (!service_valid(i))
353                 return -EINVAL;
354         struct rte_service_spec_impl *s = &rte_services[i];
355         if (s->comp_runstate != RUNSTATE_RUNNING ||
356                         s->app_runstate != RUNSTATE_RUNNING ||
357                         !(service_mask & (UINT64_C(1) << i)))
358                 return -ENOEXEC;
359
360         /* check do we need cmpset, if MT safe or <= 1 core
361          * mapped, atomic ops are not required.
362          */
363         const int use_atomics = (service_mt_safe(s) == 0) &&
364                                 (rte_atomic32_read(&s->num_mapped_cores) > 1);
365         if (use_atomics) {
366                 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
367                         return -EBUSY;
368
369                 rte_service_runner_do_callback(s, cs, i);
370                 rte_atomic32_clear(&s->execute_lock);
371         } else
372                 rte_service_runner_do_callback(s, cs, i);
373
374         return 0;
375 }
376
377 int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
378                 uint32_t serialize_mt_unsafe)
379 {
380         /* run service on calling core, using all-ones as the service mask */
381         if (!service_valid(id))
382                 return -EINVAL;
383
384         struct core_state *cs = &lcore_states[rte_lcore_id()];
385         struct rte_service_spec_impl *s = &rte_services[id];
386
387         /* Atomically add this core to the mapped cores first, then examine if
388          * we can run the service. This avoids a race condition between
389          * checking the value, and atomically adding to the mapped count.
390          */
391         if (serialize_mt_unsafe)
392                 rte_atomic32_inc(&s->num_mapped_cores);
393
394         if (service_mt_safe(s) == 0 &&
395                         rte_atomic32_read(&s->num_mapped_cores) > 1) {
396                 if (serialize_mt_unsafe)
397                         rte_atomic32_dec(&s->num_mapped_cores);
398                 return -EBUSY;
399         }
400
401         int ret = service_run(id, cs, UINT64_MAX);
402
403         if (serialize_mt_unsafe)
404                 rte_atomic32_dec(&s->num_mapped_cores);
405
406         return ret;
407 }
408
409 static int32_t
410 rte_service_runner_func(void *arg)
411 {
412         RTE_SET_USED(arg);
413         uint32_t i;
414         const int lcore = rte_lcore_id();
415         struct core_state *cs = &lcore_states[lcore];
416
417         while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
418                 const uint64_t service_mask = cs->service_mask;
419
420                 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
421                         /* return value ignored as no change to code flow */
422                         service_run(i, cs, service_mask);
423                 }
424
425                 cs->loops++;
426
427                 rte_smp_rmb();
428         }
429
430         lcore_config[lcore].state = WAIT;
431
432         return 0;
433 }
434
435 int32_t
436 rte_service_lcore_count(void)
437 {
438         int32_t count = 0;
439         uint32_t i;
440         for (i = 0; i < RTE_MAX_LCORE; i++)
441                 count += lcore_states[i].is_service_core;
442         return count;
443 }
444
445 int32_t
446 rte_service_lcore_list(uint32_t array[], uint32_t n)
447 {
448         uint32_t count = rte_service_lcore_count();
449         if (count > n)
450                 return -ENOMEM;
451
452         if (!array)
453                 return -EINVAL;
454
455         uint32_t i;
456         uint32_t idx = 0;
457         for (i = 0; i < RTE_MAX_LCORE; i++) {
458                 struct core_state *cs = &lcore_states[i];
459                 if (cs->is_service_core) {
460                         array[idx] = i;
461                         idx++;
462                 }
463         }
464
465         return count;
466 }
467
468 int32_t
469 rte_service_lcore_count_services(uint32_t lcore)
470 {
471         if (lcore >= RTE_MAX_LCORE)
472                 return -EINVAL;
473
474         struct core_state *cs = &lcore_states[lcore];
475         if (!cs->is_service_core)
476                 return -ENOTSUP;
477
478         return __builtin_popcountll(cs->service_mask);
479 }
480
481 int32_t
482 rte_service_start_with_defaults(void)
483 {
484         /* create a default mapping from cores to services, then start the
485          * services to make them transparent to unaware applications.
486          */
487         uint32_t i;
488         int ret;
489         uint32_t count = rte_service_get_count();
490
491         int32_t lcore_iter = 0;
492         uint32_t ids[RTE_MAX_LCORE] = {0};
493         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
494
495         if (lcore_count == 0)
496                 return -ENOTSUP;
497
498         for (i = 0; (int)i < lcore_count; i++)
499                 rte_service_lcore_start(ids[i]);
500
501         for (i = 0; i < count; i++) {
502                 /* do 1:1 core mapping here, with each service getting
503                  * assigned a single core by default. Adding multiple services
504                  * should multiplex to a single core, or 1:1 if there are the
505                  * same amount of services as service-cores
506                  */
507                 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
508                 if (ret)
509                         return -ENODEV;
510
511                 lcore_iter++;
512                 if (lcore_iter >= lcore_count)
513                         lcore_iter = 0;
514
515                 ret = rte_service_runstate_set(i, 1);
516                 if (ret)
517                         return -ENOEXEC;
518         }
519
520         return 0;
521 }
522
523 static int32_t
524 service_update(struct rte_service_spec *service, uint32_t lcore,
525                 uint32_t *set, uint32_t *enabled)
526 {
527         uint32_t i;
528         int32_t sid = -1;
529
530         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
531                 if ((struct rte_service_spec *)&rte_services[i] == service &&
532                                 service_valid(i)) {
533                         sid = i;
534                         break;
535                 }
536         }
537
538         if (sid == -1 || lcore >= RTE_MAX_LCORE)
539                 return -EINVAL;
540
541         if (!lcore_states[lcore].is_service_core)
542                 return -EINVAL;
543
544         uint64_t sid_mask = UINT64_C(1) << sid;
545         if (set) {
546                 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
547                         sid_mask;
548
549                 if (*set && !lcore_mapped) {
550                         lcore_states[lcore].service_mask |= sid_mask;
551                         rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
552                 }
553                 if (!*set && lcore_mapped) {
554                         lcore_states[lcore].service_mask &= ~(sid_mask);
555                         rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
556                 }
557         }
558
559         if (enabled)
560                 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
561
562         rte_smp_wmb();
563
564         return 0;
565 }
566
567 int32_t
568 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
569 {
570         struct rte_service_spec_impl *s;
571         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
572         uint32_t on = enabled > 0;
573         return service_update(&s->spec, lcore, &on, 0);
574 }
575
576 int32_t
577 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
578 {
579         struct rte_service_spec_impl *s;
580         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
581         uint32_t enabled;
582         int ret = service_update(&s->spec, lcore, 0, &enabled);
583         if (ret == 0)
584                 return enabled;
585         return ret;
586 }
587
588 static void
589 set_lcore_state(uint32_t lcore, int32_t state)
590 {
591         /* mark core state in hugepage backed config */
592         struct rte_config *cfg = rte_eal_get_configuration();
593         cfg->lcore_role[lcore] = state;
594
595         /* mark state in process local lcore_config */
596         lcore_config[lcore].core_role = state;
597
598         /* update per-lcore optimized state tracking */
599         lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
600 }
601
602 int32_t
603 rte_service_lcore_reset_all(void)
604 {
605         /* loop over cores, reset all to mask 0 */
606         uint32_t i;
607         for (i = 0; i < RTE_MAX_LCORE; i++) {
608                 if (lcore_states[i].is_service_core) {
609                         lcore_states[i].service_mask = 0;
610                         set_lcore_state(i, ROLE_RTE);
611                         lcore_states[i].runstate = RUNSTATE_STOPPED;
612                 }
613         }
614         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
615                 rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
616
617         rte_smp_wmb();
618
619         return 0;
620 }
621
622 int32_t
623 rte_service_lcore_add(uint32_t lcore)
624 {
625         if (lcore >= RTE_MAX_LCORE)
626                 return -EINVAL;
627         if (lcore_states[lcore].is_service_core)
628                 return -EALREADY;
629
630         set_lcore_state(lcore, ROLE_SERVICE);
631
632         /* ensure that after adding a core the mask and state are defaults */
633         lcore_states[lcore].service_mask = 0;
634         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
635
636         rte_smp_wmb();
637
638         return rte_eal_wait_lcore(lcore);
639 }
640
641 int32_t
642 rte_service_lcore_del(uint32_t lcore)
643 {
644         if (lcore >= RTE_MAX_LCORE)
645                 return -EINVAL;
646
647         struct core_state *cs = &lcore_states[lcore];
648         if (!cs->is_service_core)
649                 return -EINVAL;
650
651         if (cs->runstate != RUNSTATE_STOPPED)
652                 return -EBUSY;
653
654         set_lcore_state(lcore, ROLE_RTE);
655
656         rte_smp_wmb();
657         return 0;
658 }
659
660 int32_t
661 rte_service_lcore_start(uint32_t lcore)
662 {
663         if (lcore >= RTE_MAX_LCORE)
664                 return -EINVAL;
665
666         struct core_state *cs = &lcore_states[lcore];
667         if (!cs->is_service_core)
668                 return -EINVAL;
669
670         if (cs->runstate == RUNSTATE_RUNNING)
671                 return -EALREADY;
672
673         /* set core to run state first, and then launch otherwise it will
674          * return immediately as runstate keeps it in the service poll loop
675          */
676         lcore_states[lcore].runstate = RUNSTATE_RUNNING;
677
678         int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
679         /* returns -EBUSY if the core is already launched, 0 on success */
680         return ret;
681 }
682
683 int32_t
684 rte_service_lcore_stop(uint32_t lcore)
685 {
686         if (lcore >= RTE_MAX_LCORE)
687                 return -EINVAL;
688
689         if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
690                 return -EALREADY;
691
692         uint32_t i;
693         uint64_t service_mask = lcore_states[lcore].service_mask;
694         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
695                 int32_t enabled = service_mask & (UINT64_C(1) << i);
696                 int32_t service_running = rte_service_runstate_get(i);
697                 int32_t only_core = (1 ==
698                         rte_atomic32_read(&rte_services[i].num_mapped_cores));
699
700                 /* if the core is mapped, and the service is running, and this
701                  * is the only core that is mapped, the service would cease to
702                  * run if this core stopped, so fail instead.
703                  */
704                 if (enabled && service_running && only_core)
705                         return -EBUSY;
706         }
707
708         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
709
710         return 0;
711 }
712
713 int32_t
714 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint32_t *attr_value)
715 {
716         struct rte_service_spec_impl *s;
717         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
718
719         if (!attr_value)
720                 return -EINVAL;
721
722         switch (attr_id) {
723         case RTE_SERVICE_ATTR_CYCLES:
724                 *attr_value = s->cycles_spent;
725                 return 0;
726         case RTE_SERVICE_ATTR_CALL_COUNT:
727                 *attr_value = s->calls;
728                 return 0;
729         default:
730                 return -EINVAL;
731         }
732 }
733
734 int32_t __rte_experimental
735 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
736                            uint64_t *attr_value)
737 {
738         struct core_state *cs;
739
740         if (lcore >= RTE_MAX_LCORE || !attr_value)
741                 return -EINVAL;
742
743         cs = &lcore_states[lcore];
744         if (!cs->is_service_core)
745                 return -ENOTSUP;
746
747         switch (attr_id) {
748         case RTE_SERVICE_LCORE_ATTR_LOOPS:
749                 *attr_value = cs->loops;
750                 return 0;
751         default:
752                 return -EINVAL;
753         }
754 }
755
756 static void
757 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
758                      uint64_t all_cycles, uint32_t reset)
759 {
760         /* avoid divide by zero */
761         if (all_cycles == 0)
762                 all_cycles = 1;
763
764         int calls = 1;
765         if (s->calls != 0)
766                 calls = s->calls;
767
768         if (reset) {
769                 s->cycles_spent = 0;
770                 s->calls = 0;
771                 return;
772         }
773
774         fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
775                         PRIu64"\tavg: %"PRIu64"\n",
776                         s->spec.name, service_stats_enabled(s), s->calls,
777                         s->cycles_spent, s->cycles_spent / calls);
778 }
779
780 int32_t
781 rte_service_attr_reset_all(uint32_t id)
782 {
783         struct rte_service_spec_impl *s;
784         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
785
786         int reset = 1;
787         rte_service_dump_one(NULL, s, 0, reset);
788         return 0;
789 }
790
791 int32_t __rte_experimental
792 rte_service_lcore_attr_reset_all(uint32_t lcore)
793 {
794         struct core_state *cs;
795
796         if (lcore >= RTE_MAX_LCORE)
797                 return -EINVAL;
798
799         cs = &lcore_states[lcore];
800         if (!cs->is_service_core)
801                 return -ENOTSUP;
802
803         cs->loops = 0;
804
805         return 0;
806 }
807
808 static void
809 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
810 {
811         uint32_t i;
812         struct core_state *cs = &lcore_states[lcore];
813
814         fprintf(f, "%02d\t", lcore);
815         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
816                 if (!service_valid(i))
817                         continue;
818                 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
819                 if (reset)
820                         cs->calls_per_service[i] = 0;
821         }
822         fprintf(f, "\n");
823 }
824
825 int32_t
826 rte_service_dump(FILE *f, uint32_t id)
827 {
828         uint32_t i;
829         int print_one = (id != UINT32_MAX);
830
831         uint64_t total_cycles = 0;
832
833         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
834                 if (!service_valid(i))
835                         continue;
836                 total_cycles += rte_services[i].cycles_spent;
837         }
838
839         /* print only the specified service */
840         if (print_one) {
841                 struct rte_service_spec_impl *s;
842                 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
843                 fprintf(f, "Service %s Summary\n", s->spec.name);
844                 uint32_t reset = 0;
845                 rte_service_dump_one(f, s, total_cycles, reset);
846                 return 0;
847         }
848
849         /* print all services, as UINT32_MAX was passed as id */
850         fprintf(f, "Services Summary\n");
851         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
852                 if (!service_valid(i))
853                         continue;
854                 uint32_t reset = 0;
855                 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
856         }
857
858         fprintf(f, "Service Cores Summary\n");
859         for (i = 0; i < RTE_MAX_LCORE; i++) {
860                 if (lcore_config[i].core_role != ROLE_SERVICE)
861                         continue;
862
863                 uint32_t reset = 0;
864                 service_dump_calls_per_lcore(f, i, reset);
865         }
866
867         return 0;
868 }