eal: move common header files
[dpdk.git] / lib / librte_eal / common / rte_service.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <string.h>
10
11 #include <rte_compat.h>
12 #include <rte_service.h>
13 #include <rte_service_component.h>
14
15 #include <rte_eal.h>
16 #include <rte_lcore.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_cycles.h>
20 #include <rte_atomic.h>
21 #include <rte_memory.h>
22 #include <rte_malloc.h>
23
24 #include "eal_private.h"
25
26 #define RTE_SERVICE_NUM_MAX 64
27
28 #define SERVICE_F_REGISTERED    (1 << 0)
29 #define SERVICE_F_STATS_ENABLED (1 << 1)
30 #define SERVICE_F_START_CHECK   (1 << 2)
31
32 /* runstates for services and lcores, denoting if they are active or not */
33 #define RUNSTATE_STOPPED 0
34 #define RUNSTATE_RUNNING 1
35
36 /* internal representation of a service */
37 struct rte_service_spec_impl {
38         /* public part of the struct */
39         struct rte_service_spec spec;
40
41         /* atomic lock that when set indicates a service core is currently
42          * running this service callback. When not set, a core may take the
43          * lock and then run the service callback.
44          */
45         rte_atomic32_t execute_lock;
46
47         /* API set/get-able variables */
48         int8_t app_runstate;
49         int8_t comp_runstate;
50         uint8_t internal_flags;
51
52         /* per service statistics */
53         rte_atomic32_t num_mapped_cores;
54         uint64_t calls;
55         uint64_t cycles_spent;
56 } __rte_cache_aligned;
57
58 /* the internal values of a service core */
59 struct core_state {
60         /* map of services IDs are run on this core */
61         uint64_t service_mask;
62         uint8_t runstate; /* running or stopped */
63         uint8_t is_service_core; /* set if core is currently a service core */
64         uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
65         uint64_t loops;
66         uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
67 } __rte_cache_aligned;
68
69 static uint32_t rte_service_count;
70 static struct rte_service_spec_impl *rte_services;
71 static struct core_state *lcore_states;
72 static uint32_t rte_service_library_initialized;
73
74 int32_t
75 rte_service_init(void)
76 {
77         if (rte_service_library_initialized) {
78                 RTE_LOG(NOTICE, EAL,
79                         "service library init() called, init flag %d\n",
80                         rte_service_library_initialized);
81                 return -EALREADY;
82         }
83
84         rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
85                         sizeof(struct rte_service_spec_impl),
86                         RTE_CACHE_LINE_SIZE);
87         if (!rte_services) {
88                 RTE_LOG(ERR, EAL, "error allocating rte services array\n");
89                 goto fail_mem;
90         }
91
92         lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
93                         sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
94         if (!lcore_states) {
95                 RTE_LOG(ERR, EAL, "error allocating core states array\n");
96                 goto fail_mem;
97         }
98
99         int i;
100         int count = 0;
101         struct rte_config *cfg = rte_eal_get_configuration();
102         for (i = 0; i < RTE_MAX_LCORE; i++) {
103                 if (lcore_config[i].core_role == ROLE_SERVICE) {
104                         if ((unsigned int)i == cfg->master_lcore)
105                                 continue;
106                         rte_service_lcore_add(i);
107                         count++;
108                 }
109         }
110
111         rte_service_library_initialized = 1;
112         return 0;
113 fail_mem:
114         rte_free(rte_services);
115         rte_free(lcore_states);
116         return -ENOMEM;
117 }
118
119 void
120 rte_service_finalize(void)
121 {
122         if (!rte_service_library_initialized)
123                 return;
124
125         rte_service_lcore_reset_all();
126         rte_eal_mp_wait_lcore();
127
128         rte_free(rte_services);
129         rte_free(lcore_states);
130
131         rte_service_library_initialized = 0;
132 }
133
134 /* returns 1 if service is registered and has not been unregistered
135  * Returns 0 if service never registered, or has been unregistered
136  */
137 static inline int
138 service_valid(uint32_t id)
139 {
140         return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
141 }
142
143 static struct rte_service_spec_impl *
144 service_get(uint32_t id)
145 {
146         return &rte_services[id];
147 }
148
149 /* validate ID and retrieve service pointer, or return error value */
150 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
151         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
152                 return retval;                                          \
153         service = &rte_services[id];                                    \
154 } while (0)
155
156 /* returns 1 if statistics should be collected for service
157  * Returns 0 if statistics should not be collected for service
158  */
159 static inline int
160 service_stats_enabled(struct rte_service_spec_impl *impl)
161 {
162         return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
163 }
164
165 static inline int
166 service_mt_safe(struct rte_service_spec_impl *s)
167 {
168         return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
169 }
170
171 int32_t
172 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
173 {
174         struct rte_service_spec_impl *s;
175         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
176
177         if (enabled)
178                 s->internal_flags |= SERVICE_F_STATS_ENABLED;
179         else
180                 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
181
182         return 0;
183 }
184
185 int32_t
186 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
187 {
188         struct rte_service_spec_impl *s;
189         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
190
191         if (enabled)
192                 s->internal_flags |= SERVICE_F_START_CHECK;
193         else
194                 s->internal_flags &= ~(SERVICE_F_START_CHECK);
195
196         return 0;
197 }
198
199 uint32_t
200 rte_service_get_count(void)
201 {
202         return rte_service_count;
203 }
204
205 int32_t
206 rte_service_get_by_name(const char *name, uint32_t *service_id)
207 {
208         if (!service_id)
209                 return -EINVAL;
210
211         int i;
212         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
213                 if (service_valid(i) &&
214                                 strcmp(name, rte_services[i].spec.name) == 0) {
215                         *service_id = i;
216                         return 0;
217                 }
218         }
219
220         return -ENODEV;
221 }
222
223 const char *
224 rte_service_get_name(uint32_t id)
225 {
226         struct rte_service_spec_impl *s;
227         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
228         return s->spec.name;
229 }
230
231 int32_t
232 rte_service_probe_capability(uint32_t id, uint32_t capability)
233 {
234         struct rte_service_spec_impl *s;
235         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
236         return !!(s->spec.capabilities & capability);
237 }
238
239 int32_t
240 rte_service_component_register(const struct rte_service_spec *spec,
241                                uint32_t *id_ptr)
242 {
243         uint32_t i;
244         int32_t free_slot = -1;
245
246         if (spec->callback == NULL || strlen(spec->name) == 0)
247                 return -EINVAL;
248
249         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
250                 if (!service_valid(i)) {
251                         free_slot = i;
252                         break;
253                 }
254         }
255
256         if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
257                 return -ENOSPC;
258
259         struct rte_service_spec_impl *s = &rte_services[free_slot];
260         s->spec = *spec;
261         s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
262
263         rte_smp_wmb();
264         rte_service_count++;
265
266         if (id_ptr)
267                 *id_ptr = free_slot;
268
269         return 0;
270 }
271
272 int32_t
273 rte_service_component_unregister(uint32_t id)
274 {
275         uint32_t i;
276         struct rte_service_spec_impl *s;
277         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
278
279         rte_service_count--;
280         rte_smp_wmb();
281
282         s->internal_flags &= ~(SERVICE_F_REGISTERED);
283
284         /* clear the run-bit in all cores */
285         for (i = 0; i < RTE_MAX_LCORE; i++)
286                 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
287
288         memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
289
290         return 0;
291 }
292
293 int32_t
294 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
295 {
296         struct rte_service_spec_impl *s;
297         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
298
299         if (runstate)
300                 s->comp_runstate = RUNSTATE_RUNNING;
301         else
302                 s->comp_runstate = RUNSTATE_STOPPED;
303
304         rte_smp_wmb();
305         return 0;
306 }
307
308 int32_t
309 rte_service_runstate_set(uint32_t id, uint32_t runstate)
310 {
311         struct rte_service_spec_impl *s;
312         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
313
314         if (runstate)
315                 s->app_runstate = RUNSTATE_RUNNING;
316         else
317                 s->app_runstate = RUNSTATE_STOPPED;
318
319         rte_smp_wmb();
320         return 0;
321 }
322
323 int32_t
324 rte_service_runstate_get(uint32_t id)
325 {
326         struct rte_service_spec_impl *s;
327         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
328         rte_smp_rmb();
329
330         int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
331         int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
332
333         return (s->app_runstate == RUNSTATE_RUNNING) &&
334                 (s->comp_runstate == RUNSTATE_RUNNING) &&
335                 (check_disabled | lcore_mapped);
336 }
337
338 static inline void
339 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
340                                struct core_state *cs, uint32_t service_idx)
341 {
342         void *userdata = s->spec.callback_userdata;
343
344         if (service_stats_enabled(s)) {
345                 uint64_t start = rte_rdtsc();
346                 s->spec.callback(userdata);
347                 uint64_t end = rte_rdtsc();
348                 s->cycles_spent += end - start;
349                 cs->calls_per_service[service_idx]++;
350                 s->calls++;
351         } else
352                 s->spec.callback(userdata);
353 }
354
355
356 /* Expects the service 's' is valid. */
357 static int32_t
358 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
359             struct rte_service_spec_impl *s)
360 {
361         if (!s)
362                 return -EINVAL;
363
364         if (s->comp_runstate != RUNSTATE_RUNNING ||
365                         s->app_runstate != RUNSTATE_RUNNING ||
366                         !(service_mask & (UINT64_C(1) << i))) {
367                 cs->service_active_on_lcore[i] = 0;
368                 return -ENOEXEC;
369         }
370
371         cs->service_active_on_lcore[i] = 1;
372
373         /* check do we need cmpset, if MT safe or <= 1 core
374          * mapped, atomic ops are not required.
375          */
376         const int use_atomics = (service_mt_safe(s) == 0) &&
377                                 (rte_atomic32_read(&s->num_mapped_cores) > 1);
378         if (use_atomics) {
379                 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
380                         return -EBUSY;
381
382                 rte_service_runner_do_callback(s, cs, i);
383                 rte_atomic32_clear(&s->execute_lock);
384         } else
385                 rte_service_runner_do_callback(s, cs, i);
386
387         return 0;
388 }
389
390 int32_t
391 rte_service_may_be_active(uint32_t id)
392 {
393         uint32_t ids[RTE_MAX_LCORE] = {0};
394         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
395         int i;
396
397         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))
398                 return -EINVAL;
399
400         for (i = 0; i < lcore_count; i++) {
401                 if (lcore_states[i].service_active_on_lcore[id])
402                         return 1;
403         }
404
405         return 0;
406 }
407
408 int32_t
409 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
410 {
411         struct core_state *cs = &lcore_states[rte_lcore_id()];
412         struct rte_service_spec_impl *s;
413
414         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
415
416         /* Atomically add this core to the mapped cores first, then examine if
417          * we can run the service. This avoids a race condition between
418          * checking the value, and atomically adding to the mapped count.
419          */
420         if (serialize_mt_unsafe)
421                 rte_atomic32_inc(&s->num_mapped_cores);
422
423         if (service_mt_safe(s) == 0 &&
424                         rte_atomic32_read(&s->num_mapped_cores) > 1) {
425                 if (serialize_mt_unsafe)
426                         rte_atomic32_dec(&s->num_mapped_cores);
427                 return -EBUSY;
428         }
429
430         int ret = service_run(id, cs, UINT64_MAX, s);
431
432         if (serialize_mt_unsafe)
433                 rte_atomic32_dec(&s->num_mapped_cores);
434
435         return ret;
436 }
437
438 static int32_t
439 rte_service_runner_func(void *arg)
440 {
441         RTE_SET_USED(arg);
442         uint32_t i;
443         const int lcore = rte_lcore_id();
444         struct core_state *cs = &lcore_states[lcore];
445
446         while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
447                 const uint64_t service_mask = cs->service_mask;
448
449                 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
450                         if (!service_valid(i))
451                                 continue;
452                         /* return value ignored as no change to code flow */
453                         service_run(i, cs, service_mask, service_get(i));
454                 }
455
456                 cs->loops++;
457
458                 rte_smp_rmb();
459         }
460
461         lcore_config[lcore].state = WAIT;
462
463         return 0;
464 }
465
466 int32_t
467 rte_service_lcore_count(void)
468 {
469         int32_t count = 0;
470         uint32_t i;
471         for (i = 0; i < RTE_MAX_LCORE; i++)
472                 count += lcore_states[i].is_service_core;
473         return count;
474 }
475
476 int32_t
477 rte_service_lcore_list(uint32_t array[], uint32_t n)
478 {
479         uint32_t count = rte_service_lcore_count();
480         if (count > n)
481                 return -ENOMEM;
482
483         if (!array)
484                 return -EINVAL;
485
486         uint32_t i;
487         uint32_t idx = 0;
488         for (i = 0; i < RTE_MAX_LCORE; i++) {
489                 struct core_state *cs = &lcore_states[i];
490                 if (cs->is_service_core) {
491                         array[idx] = i;
492                         idx++;
493                 }
494         }
495
496         return count;
497 }
498
499 int32_t
500 rte_service_lcore_count_services(uint32_t lcore)
501 {
502         if (lcore >= RTE_MAX_LCORE)
503                 return -EINVAL;
504
505         struct core_state *cs = &lcore_states[lcore];
506         if (!cs->is_service_core)
507                 return -ENOTSUP;
508
509         return __builtin_popcountll(cs->service_mask);
510 }
511
512 int32_t
513 rte_service_start_with_defaults(void)
514 {
515         /* create a default mapping from cores to services, then start the
516          * services to make them transparent to unaware applications.
517          */
518         uint32_t i;
519         int ret;
520         uint32_t count = rte_service_get_count();
521
522         int32_t lcore_iter = 0;
523         uint32_t ids[RTE_MAX_LCORE] = {0};
524         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
525
526         if (lcore_count == 0)
527                 return -ENOTSUP;
528
529         for (i = 0; (int)i < lcore_count; i++)
530                 rte_service_lcore_start(ids[i]);
531
532         for (i = 0; i < count; i++) {
533                 /* do 1:1 core mapping here, with each service getting
534                  * assigned a single core by default. Adding multiple services
535                  * should multiplex to a single core, or 1:1 if there are the
536                  * same amount of services as service-cores
537                  */
538                 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
539                 if (ret)
540                         return -ENODEV;
541
542                 lcore_iter++;
543                 if (lcore_iter >= lcore_count)
544                         lcore_iter = 0;
545
546                 ret = rte_service_runstate_set(i, 1);
547                 if (ret)
548                         return -ENOEXEC;
549         }
550
551         return 0;
552 }
553
554 static int32_t
555 service_update(struct rte_service_spec *service, uint32_t lcore,
556                 uint32_t *set, uint32_t *enabled)
557 {
558         uint32_t i;
559         int32_t sid = -1;
560
561         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
562                 if ((struct rte_service_spec *)&rte_services[i] == service &&
563                                 service_valid(i)) {
564                         sid = i;
565                         break;
566                 }
567         }
568
569         if (sid == -1 || lcore >= RTE_MAX_LCORE)
570                 return -EINVAL;
571
572         if (!lcore_states[lcore].is_service_core)
573                 return -EINVAL;
574
575         uint64_t sid_mask = UINT64_C(1) << sid;
576         if (set) {
577                 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
578                         sid_mask;
579
580                 if (*set && !lcore_mapped) {
581                         lcore_states[lcore].service_mask |= sid_mask;
582                         rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
583                 }
584                 if (!*set && lcore_mapped) {
585                         lcore_states[lcore].service_mask &= ~(sid_mask);
586                         rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
587                 }
588         }
589
590         if (enabled)
591                 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
592
593         rte_smp_wmb();
594
595         return 0;
596 }
597
598 int32_t
599 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
600 {
601         struct rte_service_spec_impl *s;
602         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
603         uint32_t on = enabled > 0;
604         return service_update(&s->spec, lcore, &on, 0);
605 }
606
607 int32_t
608 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
609 {
610         struct rte_service_spec_impl *s;
611         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
612         uint32_t enabled;
613         int ret = service_update(&s->spec, lcore, 0, &enabled);
614         if (ret == 0)
615                 return enabled;
616         return ret;
617 }
618
619 static void
620 set_lcore_state(uint32_t lcore, int32_t state)
621 {
622         /* mark core state in hugepage backed config */
623         struct rte_config *cfg = rte_eal_get_configuration();
624         cfg->lcore_role[lcore] = state;
625
626         /* mark state in process local lcore_config */
627         lcore_config[lcore].core_role = state;
628
629         /* update per-lcore optimized state tracking */
630         lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
631 }
632
633 int32_t
634 rte_service_lcore_reset_all(void)
635 {
636         /* loop over cores, reset all to mask 0 */
637         uint32_t i;
638         for (i = 0; i < RTE_MAX_LCORE; i++) {
639                 if (lcore_states[i].is_service_core) {
640                         lcore_states[i].service_mask = 0;
641                         set_lcore_state(i, ROLE_RTE);
642                         lcore_states[i].runstate = RUNSTATE_STOPPED;
643                 }
644         }
645         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
646                 rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
647
648         rte_smp_wmb();
649
650         return 0;
651 }
652
653 int32_t
654 rte_service_lcore_add(uint32_t lcore)
655 {
656         if (lcore >= RTE_MAX_LCORE)
657                 return -EINVAL;
658         if (lcore_states[lcore].is_service_core)
659                 return -EALREADY;
660
661         set_lcore_state(lcore, ROLE_SERVICE);
662
663         /* ensure that after adding a core the mask and state are defaults */
664         lcore_states[lcore].service_mask = 0;
665         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
666
667         rte_smp_wmb();
668
669         return rte_eal_wait_lcore(lcore);
670 }
671
672 int32_t
673 rte_service_lcore_del(uint32_t lcore)
674 {
675         if (lcore >= RTE_MAX_LCORE)
676                 return -EINVAL;
677
678         struct core_state *cs = &lcore_states[lcore];
679         if (!cs->is_service_core)
680                 return -EINVAL;
681
682         if (cs->runstate != RUNSTATE_STOPPED)
683                 return -EBUSY;
684
685         set_lcore_state(lcore, ROLE_RTE);
686
687         rte_smp_wmb();
688         return 0;
689 }
690
691 int32_t
692 rte_service_lcore_start(uint32_t lcore)
693 {
694         if (lcore >= RTE_MAX_LCORE)
695                 return -EINVAL;
696
697         struct core_state *cs = &lcore_states[lcore];
698         if (!cs->is_service_core)
699                 return -EINVAL;
700
701         if (cs->runstate == RUNSTATE_RUNNING)
702                 return -EALREADY;
703
704         /* set core to run state first, and then launch otherwise it will
705          * return immediately as runstate keeps it in the service poll loop
706          */
707         lcore_states[lcore].runstate = RUNSTATE_RUNNING;
708
709         int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
710         /* returns -EBUSY if the core is already launched, 0 on success */
711         return ret;
712 }
713
714 int32_t
715 rte_service_lcore_stop(uint32_t lcore)
716 {
717         if (lcore >= RTE_MAX_LCORE)
718                 return -EINVAL;
719
720         if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
721                 return -EALREADY;
722
723         uint32_t i;
724         uint64_t service_mask = lcore_states[lcore].service_mask;
725         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
726                 int32_t enabled = service_mask & (UINT64_C(1) << i);
727                 int32_t service_running = rte_service_runstate_get(i);
728                 int32_t only_core = (1 ==
729                         rte_atomic32_read(&rte_services[i].num_mapped_cores));
730
731                 /* if the core is mapped, and the service is running, and this
732                  * is the only core that is mapped, the service would cease to
733                  * run if this core stopped, so fail instead.
734                  */
735                 if (enabled && service_running && only_core)
736                         return -EBUSY;
737         }
738
739         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
740
741         return 0;
742 }
743
744 int32_t
745 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
746 {
747         struct rte_service_spec_impl *s;
748         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
749
750         if (!attr_value)
751                 return -EINVAL;
752
753         switch (attr_id) {
754         case RTE_SERVICE_ATTR_CYCLES:
755                 *attr_value = s->cycles_spent;
756                 return 0;
757         case RTE_SERVICE_ATTR_CALL_COUNT:
758                 *attr_value = s->calls;
759                 return 0;
760         default:
761                 return -EINVAL;
762         }
763 }
764
765 int32_t
766 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
767                            uint64_t *attr_value)
768 {
769         struct core_state *cs;
770
771         if (lcore >= RTE_MAX_LCORE || !attr_value)
772                 return -EINVAL;
773
774         cs = &lcore_states[lcore];
775         if (!cs->is_service_core)
776                 return -ENOTSUP;
777
778         switch (attr_id) {
779         case RTE_SERVICE_LCORE_ATTR_LOOPS:
780                 *attr_value = cs->loops;
781                 return 0;
782         default:
783                 return -EINVAL;
784         }
785 }
786
787 static void
788 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
789                      uint64_t all_cycles, uint32_t reset)
790 {
791         /* avoid divide by zero */
792         if (all_cycles == 0)
793                 all_cycles = 1;
794
795         int calls = 1;
796         if (s->calls != 0)
797                 calls = s->calls;
798
799         if (reset) {
800                 s->cycles_spent = 0;
801                 s->calls = 0;
802                 return;
803         }
804
805         if (f == NULL)
806                 return;
807
808         fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
809                         PRIu64"\tavg: %"PRIu64"\n",
810                         s->spec.name, service_stats_enabled(s), s->calls,
811                         s->cycles_spent, s->cycles_spent / calls);
812 }
813
814 int32_t
815 rte_service_attr_reset_all(uint32_t id)
816 {
817         struct rte_service_spec_impl *s;
818         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
819
820         int reset = 1;
821         rte_service_dump_one(NULL, s, 0, reset);
822         return 0;
823 }
824
825 int32_t
826 rte_service_lcore_attr_reset_all(uint32_t lcore)
827 {
828         struct core_state *cs;
829
830         if (lcore >= RTE_MAX_LCORE)
831                 return -EINVAL;
832
833         cs = &lcore_states[lcore];
834         if (!cs->is_service_core)
835                 return -ENOTSUP;
836
837         cs->loops = 0;
838
839         return 0;
840 }
841
842 static void
843 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
844 {
845         uint32_t i;
846         struct core_state *cs = &lcore_states[lcore];
847
848         fprintf(f, "%02d\t", lcore);
849         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
850                 if (!service_valid(i))
851                         continue;
852                 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
853                 if (reset)
854                         cs->calls_per_service[i] = 0;
855         }
856         fprintf(f, "\n");
857 }
858
859 int32_t
860 rte_service_dump(FILE *f, uint32_t id)
861 {
862         uint32_t i;
863         int print_one = (id != UINT32_MAX);
864
865         uint64_t total_cycles = 0;
866
867         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
868                 if (!service_valid(i))
869                         continue;
870                 total_cycles += rte_services[i].cycles_spent;
871         }
872
873         /* print only the specified service */
874         if (print_one) {
875                 struct rte_service_spec_impl *s;
876                 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
877                 fprintf(f, "Service %s Summary\n", s->spec.name);
878                 uint32_t reset = 0;
879                 rte_service_dump_one(f, s, total_cycles, reset);
880                 return 0;
881         }
882
883         /* print all services, as UINT32_MAX was passed as id */
884         fprintf(f, "Services Summary\n");
885         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
886                 if (!service_valid(i))
887                         continue;
888                 uint32_t reset = 0;
889                 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
890         }
891
892         fprintf(f, "Service Cores Summary\n");
893         for (i = 0; i < RTE_MAX_LCORE; i++) {
894                 if (lcore_config[i].core_role != ROLE_SERVICE)
895                         continue;
896
897                 uint32_t reset = 0;
898                 service_dump_calls_per_lcore(f, i, reset);
899         }
900
901         return 0;
902 }