eal: make lcore config private
[dpdk.git] / lib / librte_eal / common / rte_service.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <string.h>
10
11 #include <rte_compat.h>
12 #include <rte_service.h>
13 #include "include/rte_service_component.h"
14
15 #include <rte_eal.h>
16 #include <rte_lcore.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_cycles.h>
20 #include <rte_atomic.h>
21 #include <rte_memory.h>
22 #include <rte_malloc.h>
23
24 #include "eal_private.h"
25
26 #define RTE_SERVICE_NUM_MAX 64
27
28 #define SERVICE_F_REGISTERED    (1 << 0)
29 #define SERVICE_F_STATS_ENABLED (1 << 1)
30 #define SERVICE_F_START_CHECK   (1 << 2)
31
32 /* runstates for services and lcores, denoting if they are active or not */
33 #define RUNSTATE_STOPPED 0
34 #define RUNSTATE_RUNNING 1
35
36 /* internal representation of a service */
37 struct rte_service_spec_impl {
38         /* public part of the struct */
39         struct rte_service_spec spec;
40
41         /* atomic lock that when set indicates a service core is currently
42          * running this service callback. When not set, a core may take the
43          * lock and then run the service callback.
44          */
45         rte_atomic32_t execute_lock;
46
47         /* API set/get-able variables */
48         int8_t app_runstate;
49         int8_t comp_runstate;
50         uint8_t internal_flags;
51
52         /* per service statistics */
53         rte_atomic32_t num_mapped_cores;
54         uint64_t calls;
55         uint64_t cycles_spent;
56 } __rte_cache_aligned;
57
58 /* the internal values of a service core */
59 struct core_state {
60         /* map of services IDs are run on this core */
61         uint64_t service_mask;
62         uint8_t runstate; /* running or stopped */
63         uint8_t is_service_core; /* set if core is currently a service core */
64         uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
65         uint64_t loops;
66         uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
67 } __rte_cache_aligned;
68
69 static uint32_t rte_service_count;
70 static struct rte_service_spec_impl *rte_services;
71 static struct core_state *lcore_states;
72 static uint32_t rte_service_library_initialized;
73
74 int32_t
75 rte_service_init(void)
76 {
77         if (rte_service_library_initialized) {
78                 RTE_LOG(NOTICE, EAL,
79                         "service library init() called, init flag %d\n",
80                         rte_service_library_initialized);
81                 return -EALREADY;
82         }
83
84         rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
85                         sizeof(struct rte_service_spec_impl),
86                         RTE_CACHE_LINE_SIZE);
87         if (!rte_services) {
88                 RTE_LOG(ERR, EAL, "error allocating rte services array\n");
89                 goto fail_mem;
90         }
91
92         lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
93                         sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
94         if (!lcore_states) {
95                 RTE_LOG(ERR, EAL, "error allocating core states array\n");
96                 goto fail_mem;
97         }
98
99         int i;
100         int count = 0;
101         struct rte_config *cfg = rte_eal_get_configuration();
102         for (i = 0; i < RTE_MAX_LCORE; i++) {
103                 if (lcore_config[i].core_role == ROLE_SERVICE) {
104                         if ((unsigned int)i == cfg->master_lcore)
105                                 continue;
106                         rte_service_lcore_add(i);
107                         count++;
108                 }
109         }
110
111         rte_service_library_initialized = 1;
112         return 0;
113 fail_mem:
114         rte_free(rte_services);
115         rte_free(lcore_states);
116         return -ENOMEM;
117 }
118
119 void
120 rte_service_finalize(void)
121 {
122         if (!rte_service_library_initialized)
123                 return;
124
125         rte_free(rte_services);
126         rte_free(lcore_states);
127
128         rte_service_library_initialized = 0;
129 }
130
131 /* returns 1 if service is registered and has not been unregistered
132  * Returns 0 if service never registered, or has been unregistered
133  */
134 static inline int
135 service_valid(uint32_t id)
136 {
137         return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
138 }
139
140 /* validate ID and retrieve service pointer, or return error value */
141 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
142         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
143                 return retval;                                          \
144         service = &rte_services[id];                                    \
145 } while (0)
146
147 /* returns 1 if statistics should be collected for service
148  * Returns 0 if statistics should not be collected for service
149  */
150 static inline int
151 service_stats_enabled(struct rte_service_spec_impl *impl)
152 {
153         return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
154 }
155
156 static inline int
157 service_mt_safe(struct rte_service_spec_impl *s)
158 {
159         return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
160 }
161
162 int32_t
163 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
164 {
165         struct rte_service_spec_impl *s;
166         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
167
168         if (enabled)
169                 s->internal_flags |= SERVICE_F_STATS_ENABLED;
170         else
171                 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
172
173         return 0;
174 }
175
176 int32_t
177 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
178 {
179         struct rte_service_spec_impl *s;
180         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
181
182         if (enabled)
183                 s->internal_flags |= SERVICE_F_START_CHECK;
184         else
185                 s->internal_flags &= ~(SERVICE_F_START_CHECK);
186
187         return 0;
188 }
189
190 uint32_t
191 rte_service_get_count(void)
192 {
193         return rte_service_count;
194 }
195
196 int32_t
197 rte_service_get_by_name(const char *name, uint32_t *service_id)
198 {
199         if (!service_id)
200                 return -EINVAL;
201
202         int i;
203         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
204                 if (service_valid(i) &&
205                                 strcmp(name, rte_services[i].spec.name) == 0) {
206                         *service_id = i;
207                         return 0;
208                 }
209         }
210
211         return -ENODEV;
212 }
213
214 const char *
215 rte_service_get_name(uint32_t id)
216 {
217         struct rte_service_spec_impl *s;
218         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
219         return s->spec.name;
220 }
221
222 int32_t
223 rte_service_probe_capability(uint32_t id, uint32_t capability)
224 {
225         struct rte_service_spec_impl *s;
226         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
227         return !!(s->spec.capabilities & capability);
228 }
229
230 int32_t
231 rte_service_component_register(const struct rte_service_spec *spec,
232                                uint32_t *id_ptr)
233 {
234         uint32_t i;
235         int32_t free_slot = -1;
236
237         if (spec->callback == NULL || strlen(spec->name) == 0)
238                 return -EINVAL;
239
240         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
241                 if (!service_valid(i)) {
242                         free_slot = i;
243                         break;
244                 }
245         }
246
247         if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
248                 return -ENOSPC;
249
250         struct rte_service_spec_impl *s = &rte_services[free_slot];
251         s->spec = *spec;
252         s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
253
254         rte_smp_wmb();
255         rte_service_count++;
256
257         if (id_ptr)
258                 *id_ptr = free_slot;
259
260         return 0;
261 }
262
263 int32_t
264 rte_service_component_unregister(uint32_t id)
265 {
266         uint32_t i;
267         struct rte_service_spec_impl *s;
268         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
269
270         rte_service_count--;
271         rte_smp_wmb();
272
273         s->internal_flags &= ~(SERVICE_F_REGISTERED);
274
275         /* clear the run-bit in all cores */
276         for (i = 0; i < RTE_MAX_LCORE; i++)
277                 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
278
279         memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
280
281         return 0;
282 }
283
284 int32_t
285 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
286 {
287         struct rte_service_spec_impl *s;
288         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
289
290         if (runstate)
291                 s->comp_runstate = RUNSTATE_RUNNING;
292         else
293                 s->comp_runstate = RUNSTATE_STOPPED;
294
295         rte_smp_wmb();
296         return 0;
297 }
298
299 int32_t
300 rte_service_runstate_set(uint32_t id, uint32_t runstate)
301 {
302         struct rte_service_spec_impl *s;
303         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
304
305         if (runstate)
306                 s->app_runstate = RUNSTATE_RUNNING;
307         else
308                 s->app_runstate = RUNSTATE_STOPPED;
309
310         rte_smp_wmb();
311         return 0;
312 }
313
314 int32_t
315 rte_service_runstate_get(uint32_t id)
316 {
317         struct rte_service_spec_impl *s;
318         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
319         rte_smp_rmb();
320
321         int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
322         int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
323
324         return (s->app_runstate == RUNSTATE_RUNNING) &&
325                 (s->comp_runstate == RUNSTATE_RUNNING) &&
326                 (check_disabled | lcore_mapped);
327 }
328
329 static inline void
330 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
331                                struct core_state *cs, uint32_t service_idx)
332 {
333         void *userdata = s->spec.callback_userdata;
334
335         if (service_stats_enabled(s)) {
336                 uint64_t start = rte_rdtsc();
337                 s->spec.callback(userdata);
338                 uint64_t end = rte_rdtsc();
339                 s->cycles_spent += end - start;
340                 cs->calls_per_service[service_idx]++;
341                 s->calls++;
342         } else
343                 s->spec.callback(userdata);
344 }
345
346
347 static inline int32_t
348 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
349 {
350         if (!service_valid(i))
351                 return -EINVAL;
352         struct rte_service_spec_impl *s = &rte_services[i];
353         if (s->comp_runstate != RUNSTATE_RUNNING ||
354                         s->app_runstate != RUNSTATE_RUNNING ||
355                         !(service_mask & (UINT64_C(1) << i))) {
356                 cs->service_active_on_lcore[i] = 0;
357                 return -ENOEXEC;
358         }
359
360         cs->service_active_on_lcore[i] = 1;
361
362         /* check do we need cmpset, if MT safe or <= 1 core
363          * mapped, atomic ops are not required.
364          */
365         const int use_atomics = (service_mt_safe(s) == 0) &&
366                                 (rte_atomic32_read(&s->num_mapped_cores) > 1);
367         if (use_atomics) {
368                 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
369                         return -EBUSY;
370
371                 rte_service_runner_do_callback(s, cs, i);
372                 rte_atomic32_clear(&s->execute_lock);
373         } else
374                 rte_service_runner_do_callback(s, cs, i);
375
376         return 0;
377 }
378
379 int32_t
380 rte_service_may_be_active(uint32_t id)
381 {
382         uint32_t ids[RTE_MAX_LCORE] = {0};
383         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
384         int i;
385
386         if (!service_valid(id))
387                 return -EINVAL;
388
389         for (i = 0; i < lcore_count; i++) {
390                 if (lcore_states[i].service_active_on_lcore[id])
391                         return 1;
392         }
393
394         return 0;
395 }
396
397 int32_t
398 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
399 {
400         /* run service on calling core, using all-ones as the service mask */
401         if (!service_valid(id))
402                 return -EINVAL;
403
404         struct core_state *cs = &lcore_states[rte_lcore_id()];
405         struct rte_service_spec_impl *s = &rte_services[id];
406
407         /* Atomically add this core to the mapped cores first, then examine if
408          * we can run the service. This avoids a race condition between
409          * checking the value, and atomically adding to the mapped count.
410          */
411         if (serialize_mt_unsafe)
412                 rte_atomic32_inc(&s->num_mapped_cores);
413
414         if (service_mt_safe(s) == 0 &&
415                         rte_atomic32_read(&s->num_mapped_cores) > 1) {
416                 if (serialize_mt_unsafe)
417                         rte_atomic32_dec(&s->num_mapped_cores);
418                 return -EBUSY;
419         }
420
421         int ret = service_run(id, cs, UINT64_MAX);
422
423         if (serialize_mt_unsafe)
424                 rte_atomic32_dec(&s->num_mapped_cores);
425
426         return ret;
427 }
428
429 static int32_t
430 rte_service_runner_func(void *arg)
431 {
432         RTE_SET_USED(arg);
433         uint32_t i;
434         const int lcore = rte_lcore_id();
435         struct core_state *cs = &lcore_states[lcore];
436
437         while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
438                 const uint64_t service_mask = cs->service_mask;
439
440                 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
441                         /* return value ignored as no change to code flow */
442                         service_run(i, cs, service_mask);
443                 }
444
445                 cs->loops++;
446
447                 rte_smp_rmb();
448         }
449
450         lcore_config[lcore].state = WAIT;
451
452         return 0;
453 }
454
455 int32_t
456 rte_service_lcore_count(void)
457 {
458         int32_t count = 0;
459         uint32_t i;
460         for (i = 0; i < RTE_MAX_LCORE; i++)
461                 count += lcore_states[i].is_service_core;
462         return count;
463 }
464
465 int32_t
466 rte_service_lcore_list(uint32_t array[], uint32_t n)
467 {
468         uint32_t count = rte_service_lcore_count();
469         if (count > n)
470                 return -ENOMEM;
471
472         if (!array)
473                 return -EINVAL;
474
475         uint32_t i;
476         uint32_t idx = 0;
477         for (i = 0; i < RTE_MAX_LCORE; i++) {
478                 struct core_state *cs = &lcore_states[i];
479                 if (cs->is_service_core) {
480                         array[idx] = i;
481                         idx++;
482                 }
483         }
484
485         return count;
486 }
487
488 int32_t
489 rte_service_lcore_count_services(uint32_t lcore)
490 {
491         if (lcore >= RTE_MAX_LCORE)
492                 return -EINVAL;
493
494         struct core_state *cs = &lcore_states[lcore];
495         if (!cs->is_service_core)
496                 return -ENOTSUP;
497
498         return __builtin_popcountll(cs->service_mask);
499 }
500
501 int32_t
502 rte_service_start_with_defaults(void)
503 {
504         /* create a default mapping from cores to services, then start the
505          * services to make them transparent to unaware applications.
506          */
507         uint32_t i;
508         int ret;
509         uint32_t count = rte_service_get_count();
510
511         int32_t lcore_iter = 0;
512         uint32_t ids[RTE_MAX_LCORE] = {0};
513         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
514
515         if (lcore_count == 0)
516                 return -ENOTSUP;
517
518         for (i = 0; (int)i < lcore_count; i++)
519                 rte_service_lcore_start(ids[i]);
520
521         for (i = 0; i < count; i++) {
522                 /* do 1:1 core mapping here, with each service getting
523                  * assigned a single core by default. Adding multiple services
524                  * should multiplex to a single core, or 1:1 if there are the
525                  * same amount of services as service-cores
526                  */
527                 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
528                 if (ret)
529                         return -ENODEV;
530
531                 lcore_iter++;
532                 if (lcore_iter >= lcore_count)
533                         lcore_iter = 0;
534
535                 ret = rte_service_runstate_set(i, 1);
536                 if (ret)
537                         return -ENOEXEC;
538         }
539
540         return 0;
541 }
542
543 static int32_t
544 service_update(struct rte_service_spec *service, uint32_t lcore,
545                 uint32_t *set, uint32_t *enabled)
546 {
547         uint32_t i;
548         int32_t sid = -1;
549
550         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
551                 if ((struct rte_service_spec *)&rte_services[i] == service &&
552                                 service_valid(i)) {
553                         sid = i;
554                         break;
555                 }
556         }
557
558         if (sid == -1 || lcore >= RTE_MAX_LCORE)
559                 return -EINVAL;
560
561         if (!lcore_states[lcore].is_service_core)
562                 return -EINVAL;
563
564         uint64_t sid_mask = UINT64_C(1) << sid;
565         if (set) {
566                 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
567                         sid_mask;
568
569                 if (*set && !lcore_mapped) {
570                         lcore_states[lcore].service_mask |= sid_mask;
571                         rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
572                 }
573                 if (!*set && lcore_mapped) {
574                         lcore_states[lcore].service_mask &= ~(sid_mask);
575                         rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
576                 }
577         }
578
579         if (enabled)
580                 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
581
582         rte_smp_wmb();
583
584         return 0;
585 }
586
587 int32_t
588 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
589 {
590         struct rte_service_spec_impl *s;
591         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
592         uint32_t on = enabled > 0;
593         return service_update(&s->spec, lcore, &on, 0);
594 }
595
596 int32_t
597 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
598 {
599         struct rte_service_spec_impl *s;
600         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
601         uint32_t enabled;
602         int ret = service_update(&s->spec, lcore, 0, &enabled);
603         if (ret == 0)
604                 return enabled;
605         return ret;
606 }
607
608 static void
609 set_lcore_state(uint32_t lcore, int32_t state)
610 {
611         /* mark core state in hugepage backed config */
612         struct rte_config *cfg = rte_eal_get_configuration();
613         cfg->lcore_role[lcore] = state;
614
615         /* mark state in process local lcore_config */
616         lcore_config[lcore].core_role = state;
617
618         /* update per-lcore optimized state tracking */
619         lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
620 }
621
622 int32_t
623 rte_service_lcore_reset_all(void)
624 {
625         /* loop over cores, reset all to mask 0 */
626         uint32_t i;
627         for (i = 0; i < RTE_MAX_LCORE; i++) {
628                 if (lcore_states[i].is_service_core) {
629                         lcore_states[i].service_mask = 0;
630                         set_lcore_state(i, ROLE_RTE);
631                         lcore_states[i].runstate = RUNSTATE_STOPPED;
632                 }
633         }
634         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
635                 rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
636
637         rte_smp_wmb();
638
639         return 0;
640 }
641
642 int32_t
643 rte_service_lcore_add(uint32_t lcore)
644 {
645         if (lcore >= RTE_MAX_LCORE)
646                 return -EINVAL;
647         if (lcore_states[lcore].is_service_core)
648                 return -EALREADY;
649
650         set_lcore_state(lcore, ROLE_SERVICE);
651
652         /* ensure that after adding a core the mask and state are defaults */
653         lcore_states[lcore].service_mask = 0;
654         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
655
656         rte_smp_wmb();
657
658         return rte_eal_wait_lcore(lcore);
659 }
660
661 int32_t
662 rte_service_lcore_del(uint32_t lcore)
663 {
664         if (lcore >= RTE_MAX_LCORE)
665                 return -EINVAL;
666
667         struct core_state *cs = &lcore_states[lcore];
668         if (!cs->is_service_core)
669                 return -EINVAL;
670
671         if (cs->runstate != RUNSTATE_STOPPED)
672                 return -EBUSY;
673
674         set_lcore_state(lcore, ROLE_RTE);
675
676         rte_smp_wmb();
677         return 0;
678 }
679
680 int32_t
681 rte_service_lcore_start(uint32_t lcore)
682 {
683         if (lcore >= RTE_MAX_LCORE)
684                 return -EINVAL;
685
686         struct core_state *cs = &lcore_states[lcore];
687         if (!cs->is_service_core)
688                 return -EINVAL;
689
690         if (cs->runstate == RUNSTATE_RUNNING)
691                 return -EALREADY;
692
693         /* set core to run state first, and then launch otherwise it will
694          * return immediately as runstate keeps it in the service poll loop
695          */
696         lcore_states[lcore].runstate = RUNSTATE_RUNNING;
697
698         int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
699         /* returns -EBUSY if the core is already launched, 0 on success */
700         return ret;
701 }
702
703 int32_t
704 rte_service_lcore_stop(uint32_t lcore)
705 {
706         if (lcore >= RTE_MAX_LCORE)
707                 return -EINVAL;
708
709         if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
710                 return -EALREADY;
711
712         uint32_t i;
713         uint64_t service_mask = lcore_states[lcore].service_mask;
714         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
715                 int32_t enabled = service_mask & (UINT64_C(1) << i);
716                 int32_t service_running = rte_service_runstate_get(i);
717                 int32_t only_core = (1 ==
718                         rte_atomic32_read(&rte_services[i].num_mapped_cores));
719
720                 /* if the core is mapped, and the service is running, and this
721                  * is the only core that is mapped, the service would cease to
722                  * run if this core stopped, so fail instead.
723                  */
724                 if (enabled && service_running && only_core)
725                         return -EBUSY;
726         }
727
728         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
729
730         return 0;
731 }
732
733 int32_t
734 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
735 {
736         struct rte_service_spec_impl *s;
737         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
738
739         if (!attr_value)
740                 return -EINVAL;
741
742         switch (attr_id) {
743         case RTE_SERVICE_ATTR_CYCLES:
744                 *attr_value = s->cycles_spent;
745                 return 0;
746         case RTE_SERVICE_ATTR_CALL_COUNT:
747                 *attr_value = s->calls;
748                 return 0;
749         default:
750                 return -EINVAL;
751         }
752 }
753
754 int32_t
755 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
756                            uint64_t *attr_value)
757 {
758         struct core_state *cs;
759
760         if (lcore >= RTE_MAX_LCORE || !attr_value)
761                 return -EINVAL;
762
763         cs = &lcore_states[lcore];
764         if (!cs->is_service_core)
765                 return -ENOTSUP;
766
767         switch (attr_id) {
768         case RTE_SERVICE_LCORE_ATTR_LOOPS:
769                 *attr_value = cs->loops;
770                 return 0;
771         default:
772                 return -EINVAL;
773         }
774 }
775
776 static void
777 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
778                      uint64_t all_cycles, uint32_t reset)
779 {
780         /* avoid divide by zero */
781         if (all_cycles == 0)
782                 all_cycles = 1;
783
784         int calls = 1;
785         if (s->calls != 0)
786                 calls = s->calls;
787
788         if (reset) {
789                 s->cycles_spent = 0;
790                 s->calls = 0;
791                 return;
792         }
793
794         if (f == NULL)
795                 return;
796
797         fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
798                         PRIu64"\tavg: %"PRIu64"\n",
799                         s->spec.name, service_stats_enabled(s), s->calls,
800                         s->cycles_spent, s->cycles_spent / calls);
801 }
802
803 int32_t
804 rte_service_attr_reset_all(uint32_t id)
805 {
806         struct rte_service_spec_impl *s;
807         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
808
809         int reset = 1;
810         rte_service_dump_one(NULL, s, 0, reset);
811         return 0;
812 }
813
814 int32_t
815 rte_service_lcore_attr_reset_all(uint32_t lcore)
816 {
817         struct core_state *cs;
818
819         if (lcore >= RTE_MAX_LCORE)
820                 return -EINVAL;
821
822         cs = &lcore_states[lcore];
823         if (!cs->is_service_core)
824                 return -ENOTSUP;
825
826         cs->loops = 0;
827
828         return 0;
829 }
830
831 static void
832 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
833 {
834         uint32_t i;
835         struct core_state *cs = &lcore_states[lcore];
836
837         fprintf(f, "%02d\t", lcore);
838         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
839                 if (!service_valid(i))
840                         continue;
841                 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
842                 if (reset)
843                         cs->calls_per_service[i] = 0;
844         }
845         fprintf(f, "\n");
846 }
847
848 int32_t
849 rte_service_dump(FILE *f, uint32_t id)
850 {
851         uint32_t i;
852         int print_one = (id != UINT32_MAX);
853
854         uint64_t total_cycles = 0;
855
856         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
857                 if (!service_valid(i))
858                         continue;
859                 total_cycles += rte_services[i].cycles_spent;
860         }
861
862         /* print only the specified service */
863         if (print_one) {
864                 struct rte_service_spec_impl *s;
865                 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
866                 fprintf(f, "Service %s Summary\n", s->spec.name);
867                 uint32_t reset = 0;
868                 rte_service_dump_one(f, s, total_cycles, reset);
869                 return 0;
870         }
871
872         /* print all services, as UINT32_MAX was passed as id */
873         fprintf(f, "Services Summary\n");
874         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
875                 if (!service_valid(i))
876                         continue;
877                 uint32_t reset = 0;
878                 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
879         }
880
881         fprintf(f, "Service Cores Summary\n");
882         for (i = 0; i < RTE_MAX_LCORE; i++) {
883                 if (lcore_config[i].core_role != ROLE_SERVICE)
884                         continue;
885
886                 uint32_t reset = 0;
887                 service_dump_calls_per_lcore(f, i, reset);
888         }
889
890         return 0;
891 }