service: avoid false sharing on core state
[dpdk.git] / lib / librte_eal / common / rte_service.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <string.h>
10
11 #include <rte_compat.h>
12 #include <rte_service.h>
13 #include "include/rte_service_component.h"
14
15 #include <rte_eal.h>
16 #include <rte_lcore.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_cycles.h>
20 #include <rte_atomic.h>
21 #include <rte_memory.h>
22 #include <rte_malloc.h>
23
24 #define RTE_SERVICE_NUM_MAX 64
25
26 #define SERVICE_F_REGISTERED    (1 << 0)
27 #define SERVICE_F_STATS_ENABLED (1 << 1)
28 #define SERVICE_F_START_CHECK   (1 << 2)
29
30 /* runstates for services and lcores, denoting if they are active or not */
31 #define RUNSTATE_STOPPED 0
32 #define RUNSTATE_RUNNING 1
33
34 /* internal representation of a service */
35 struct rte_service_spec_impl {
36         /* public part of the struct */
37         struct rte_service_spec spec;
38
39         /* atomic lock that when set indicates a service core is currently
40          * running this service callback. When not set, a core may take the
41          * lock and then run the service callback.
42          */
43         rte_atomic32_t execute_lock;
44
45         /* API set/get-able variables */
46         int8_t app_runstate;
47         int8_t comp_runstate;
48         uint8_t internal_flags;
49
50         /* per service statistics */
51         rte_atomic32_t num_mapped_cores;
52         uint64_t calls;
53         uint64_t cycles_spent;
54 } __rte_cache_aligned;
55
56 /* the internal values of a service core */
57 struct core_state {
58         /* map of services IDs are run on this core */
59         uint64_t service_mask;
60         uint8_t runstate; /* running or stopped */
61         uint8_t is_service_core; /* set if core is currently a service core */
62         uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
63         uint64_t loops;
64         uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
65 } __rte_cache_aligned;
66
67 static uint32_t rte_service_count;
68 static struct rte_service_spec_impl *rte_services;
69 static struct core_state *lcore_states;
70 static uint32_t rte_service_library_initialized;
71
72 int32_t
73 rte_service_init(void)
74 {
75         if (rte_service_library_initialized) {
76                 RTE_LOG(NOTICE, EAL,
77                         "service library init() called, init flag %d\n",
78                         rte_service_library_initialized);
79                 return -EALREADY;
80         }
81
82         rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
83                         sizeof(struct rte_service_spec_impl),
84                         RTE_CACHE_LINE_SIZE);
85         if (!rte_services) {
86                 RTE_LOG(ERR, EAL, "error allocating rte services array\n");
87                 goto fail_mem;
88         }
89
90         lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
91                         sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
92         if (!lcore_states) {
93                 RTE_LOG(ERR, EAL, "error allocating core states array\n");
94                 goto fail_mem;
95         }
96
97         int i;
98         int count = 0;
99         struct rte_config *cfg = rte_eal_get_configuration();
100         for (i = 0; i < RTE_MAX_LCORE; i++) {
101                 if (lcore_config[i].core_role == ROLE_SERVICE) {
102                         if ((unsigned int)i == cfg->master_lcore)
103                                 continue;
104                         rte_service_lcore_add(i);
105                         count++;
106                 }
107         }
108
109         rte_service_library_initialized = 1;
110         return 0;
111 fail_mem:
112         rte_free(rte_services);
113         rte_free(lcore_states);
114         return -ENOMEM;
115 }
116
117 void
118 rte_service_finalize(void)
119 {
120         if (!rte_service_library_initialized)
121                 return;
122
123         rte_free(rte_services);
124         rte_free(lcore_states);
125
126         rte_service_library_initialized = 0;
127 }
128
129 /* returns 1 if service is registered and has not been unregistered
130  * Returns 0 if service never registered, or has been unregistered
131  */
132 static inline int
133 service_valid(uint32_t id)
134 {
135         return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
136 }
137
138 /* validate ID and retrieve service pointer, or return error value */
139 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
140         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
141                 return retval;                                          \
142         service = &rte_services[id];                                    \
143 } while (0)
144
145 /* returns 1 if statistics should be collected for service
146  * Returns 0 if statistics should not be collected for service
147  */
148 static inline int
149 service_stats_enabled(struct rte_service_spec_impl *impl)
150 {
151         return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
152 }
153
154 static inline int
155 service_mt_safe(struct rte_service_spec_impl *s)
156 {
157         return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
158 }
159
160 int32_t
161 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
162 {
163         struct rte_service_spec_impl *s;
164         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
165
166         if (enabled)
167                 s->internal_flags |= SERVICE_F_STATS_ENABLED;
168         else
169                 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
170
171         return 0;
172 }
173
174 int32_t
175 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
176 {
177         struct rte_service_spec_impl *s;
178         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
179
180         if (enabled)
181                 s->internal_flags |= SERVICE_F_START_CHECK;
182         else
183                 s->internal_flags &= ~(SERVICE_F_START_CHECK);
184
185         return 0;
186 }
187
188 uint32_t
189 rte_service_get_count(void)
190 {
191         return rte_service_count;
192 }
193
194 int32_t
195 rte_service_get_by_name(const char *name, uint32_t *service_id)
196 {
197         if (!service_id)
198                 return -EINVAL;
199
200         int i;
201         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
202                 if (service_valid(i) &&
203                                 strcmp(name, rte_services[i].spec.name) == 0) {
204                         *service_id = i;
205                         return 0;
206                 }
207         }
208
209         return -ENODEV;
210 }
211
212 const char *
213 rte_service_get_name(uint32_t id)
214 {
215         struct rte_service_spec_impl *s;
216         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
217         return s->spec.name;
218 }
219
220 int32_t
221 rte_service_probe_capability(uint32_t id, uint32_t capability)
222 {
223         struct rte_service_spec_impl *s;
224         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
225         return !!(s->spec.capabilities & capability);
226 }
227
228 int32_t
229 rte_service_component_register(const struct rte_service_spec *spec,
230                                uint32_t *id_ptr)
231 {
232         uint32_t i;
233         int32_t free_slot = -1;
234
235         if (spec->callback == NULL || strlen(spec->name) == 0)
236                 return -EINVAL;
237
238         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
239                 if (!service_valid(i)) {
240                         free_slot = i;
241                         break;
242                 }
243         }
244
245         if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
246                 return -ENOSPC;
247
248         struct rte_service_spec_impl *s = &rte_services[free_slot];
249         s->spec = *spec;
250         s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
251
252         rte_smp_wmb();
253         rte_service_count++;
254
255         if (id_ptr)
256                 *id_ptr = free_slot;
257
258         return 0;
259 }
260
261 int32_t
262 rte_service_component_unregister(uint32_t id)
263 {
264         uint32_t i;
265         struct rte_service_spec_impl *s;
266         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
267
268         rte_service_count--;
269         rte_smp_wmb();
270
271         s->internal_flags &= ~(SERVICE_F_REGISTERED);
272
273         /* clear the run-bit in all cores */
274         for (i = 0; i < RTE_MAX_LCORE; i++)
275                 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
276
277         memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
278
279         return 0;
280 }
281
282 int32_t
283 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
284 {
285         struct rte_service_spec_impl *s;
286         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
287
288         if (runstate)
289                 s->comp_runstate = RUNSTATE_RUNNING;
290         else
291                 s->comp_runstate = RUNSTATE_STOPPED;
292
293         rte_smp_wmb();
294         return 0;
295 }
296
297 int32_t
298 rte_service_runstate_set(uint32_t id, uint32_t runstate)
299 {
300         struct rte_service_spec_impl *s;
301         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
302
303         if (runstate)
304                 s->app_runstate = RUNSTATE_RUNNING;
305         else
306                 s->app_runstate = RUNSTATE_STOPPED;
307
308         rte_smp_wmb();
309         return 0;
310 }
311
312 int32_t
313 rte_service_runstate_get(uint32_t id)
314 {
315         struct rte_service_spec_impl *s;
316         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
317         rte_smp_rmb();
318
319         int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
320         int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
321
322         return (s->app_runstate == RUNSTATE_RUNNING) &&
323                 (s->comp_runstate == RUNSTATE_RUNNING) &&
324                 (check_disabled | lcore_mapped);
325 }
326
327 static inline void
328 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
329                                struct core_state *cs, uint32_t service_idx)
330 {
331         void *userdata = s->spec.callback_userdata;
332
333         if (service_stats_enabled(s)) {
334                 uint64_t start = rte_rdtsc();
335                 s->spec.callback(userdata);
336                 uint64_t end = rte_rdtsc();
337                 s->cycles_spent += end - start;
338                 cs->calls_per_service[service_idx]++;
339                 s->calls++;
340         } else
341                 s->spec.callback(userdata);
342 }
343
344
345 static inline int32_t
346 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
347 {
348         if (!service_valid(i))
349                 return -EINVAL;
350         struct rte_service_spec_impl *s = &rte_services[i];
351         if (s->comp_runstate != RUNSTATE_RUNNING ||
352                         s->app_runstate != RUNSTATE_RUNNING ||
353                         !(service_mask & (UINT64_C(1) << i))) {
354                 cs->service_active_on_lcore[i] = 0;
355                 return -ENOEXEC;
356         }
357
358         cs->service_active_on_lcore[i] = 1;
359
360         /* check do we need cmpset, if MT safe or <= 1 core
361          * mapped, atomic ops are not required.
362          */
363         const int use_atomics = (service_mt_safe(s) == 0) &&
364                                 (rte_atomic32_read(&s->num_mapped_cores) > 1);
365         if (use_atomics) {
366                 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
367                         return -EBUSY;
368
369                 rte_service_runner_do_callback(s, cs, i);
370                 rte_atomic32_clear(&s->execute_lock);
371         } else
372                 rte_service_runner_do_callback(s, cs, i);
373
374         return 0;
375 }
376
377 int32_t
378 rte_service_may_be_active(uint32_t id)
379 {
380         uint32_t ids[RTE_MAX_LCORE] = {0};
381         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
382         int i;
383
384         if (!service_valid(id))
385                 return -EINVAL;
386
387         for (i = 0; i < lcore_count; i++) {
388                 if (lcore_states[i].service_active_on_lcore[id])
389                         return 1;
390         }
391
392         return 0;
393 }
394
395 int32_t
396 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
397 {
398         /* run service on calling core, using all-ones as the service mask */
399         if (!service_valid(id))
400                 return -EINVAL;
401
402         struct core_state *cs = &lcore_states[rte_lcore_id()];
403         struct rte_service_spec_impl *s = &rte_services[id];
404
405         /* Atomically add this core to the mapped cores first, then examine if
406          * we can run the service. This avoids a race condition between
407          * checking the value, and atomically adding to the mapped count.
408          */
409         if (serialize_mt_unsafe)
410                 rte_atomic32_inc(&s->num_mapped_cores);
411
412         if (service_mt_safe(s) == 0 &&
413                         rte_atomic32_read(&s->num_mapped_cores) > 1) {
414                 if (serialize_mt_unsafe)
415                         rte_atomic32_dec(&s->num_mapped_cores);
416                 return -EBUSY;
417         }
418
419         int ret = service_run(id, cs, UINT64_MAX);
420
421         if (serialize_mt_unsafe)
422                 rte_atomic32_dec(&s->num_mapped_cores);
423
424         return ret;
425 }
426
427 static int32_t
428 rte_service_runner_func(void *arg)
429 {
430         RTE_SET_USED(arg);
431         uint32_t i;
432         const int lcore = rte_lcore_id();
433         struct core_state *cs = &lcore_states[lcore];
434
435         while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
436                 const uint64_t service_mask = cs->service_mask;
437
438                 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
439                         /* return value ignored as no change to code flow */
440                         service_run(i, cs, service_mask);
441                 }
442
443                 cs->loops++;
444
445                 rte_smp_rmb();
446         }
447
448         lcore_config[lcore].state = WAIT;
449
450         return 0;
451 }
452
453 int32_t
454 rte_service_lcore_count(void)
455 {
456         int32_t count = 0;
457         uint32_t i;
458         for (i = 0; i < RTE_MAX_LCORE; i++)
459                 count += lcore_states[i].is_service_core;
460         return count;
461 }
462
463 int32_t
464 rte_service_lcore_list(uint32_t array[], uint32_t n)
465 {
466         uint32_t count = rte_service_lcore_count();
467         if (count > n)
468                 return -ENOMEM;
469
470         if (!array)
471                 return -EINVAL;
472
473         uint32_t i;
474         uint32_t idx = 0;
475         for (i = 0; i < RTE_MAX_LCORE; i++) {
476                 struct core_state *cs = &lcore_states[i];
477                 if (cs->is_service_core) {
478                         array[idx] = i;
479                         idx++;
480                 }
481         }
482
483         return count;
484 }
485
486 int32_t
487 rte_service_lcore_count_services(uint32_t lcore)
488 {
489         if (lcore >= RTE_MAX_LCORE)
490                 return -EINVAL;
491
492         struct core_state *cs = &lcore_states[lcore];
493         if (!cs->is_service_core)
494                 return -ENOTSUP;
495
496         return __builtin_popcountll(cs->service_mask);
497 }
498
499 int32_t
500 rte_service_start_with_defaults(void)
501 {
502         /* create a default mapping from cores to services, then start the
503          * services to make them transparent to unaware applications.
504          */
505         uint32_t i;
506         int ret;
507         uint32_t count = rte_service_get_count();
508
509         int32_t lcore_iter = 0;
510         uint32_t ids[RTE_MAX_LCORE] = {0};
511         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
512
513         if (lcore_count == 0)
514                 return -ENOTSUP;
515
516         for (i = 0; (int)i < lcore_count; i++)
517                 rte_service_lcore_start(ids[i]);
518
519         for (i = 0; i < count; i++) {
520                 /* do 1:1 core mapping here, with each service getting
521                  * assigned a single core by default. Adding multiple services
522                  * should multiplex to a single core, or 1:1 if there are the
523                  * same amount of services as service-cores
524                  */
525                 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
526                 if (ret)
527                         return -ENODEV;
528
529                 lcore_iter++;
530                 if (lcore_iter >= lcore_count)
531                         lcore_iter = 0;
532
533                 ret = rte_service_runstate_set(i, 1);
534                 if (ret)
535                         return -ENOEXEC;
536         }
537
538         return 0;
539 }
540
541 static int32_t
542 service_update(struct rte_service_spec *service, uint32_t lcore,
543                 uint32_t *set, uint32_t *enabled)
544 {
545         uint32_t i;
546         int32_t sid = -1;
547
548         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
549                 if ((struct rte_service_spec *)&rte_services[i] == service &&
550                                 service_valid(i)) {
551                         sid = i;
552                         break;
553                 }
554         }
555
556         if (sid == -1 || lcore >= RTE_MAX_LCORE)
557                 return -EINVAL;
558
559         if (!lcore_states[lcore].is_service_core)
560                 return -EINVAL;
561
562         uint64_t sid_mask = UINT64_C(1) << sid;
563         if (set) {
564                 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
565                         sid_mask;
566
567                 if (*set && !lcore_mapped) {
568                         lcore_states[lcore].service_mask |= sid_mask;
569                         rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
570                 }
571                 if (!*set && lcore_mapped) {
572                         lcore_states[lcore].service_mask &= ~(sid_mask);
573                         rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
574                 }
575         }
576
577         if (enabled)
578                 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
579
580         rte_smp_wmb();
581
582         return 0;
583 }
584
585 int32_t
586 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
587 {
588         struct rte_service_spec_impl *s;
589         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
590         uint32_t on = enabled > 0;
591         return service_update(&s->spec, lcore, &on, 0);
592 }
593
594 int32_t
595 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
596 {
597         struct rte_service_spec_impl *s;
598         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
599         uint32_t enabled;
600         int ret = service_update(&s->spec, lcore, 0, &enabled);
601         if (ret == 0)
602                 return enabled;
603         return ret;
604 }
605
606 static void
607 set_lcore_state(uint32_t lcore, int32_t state)
608 {
609         /* mark core state in hugepage backed config */
610         struct rte_config *cfg = rte_eal_get_configuration();
611         cfg->lcore_role[lcore] = state;
612
613         /* mark state in process local lcore_config */
614         lcore_config[lcore].core_role = state;
615
616         /* update per-lcore optimized state tracking */
617         lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
618 }
619
620 int32_t
621 rte_service_lcore_reset_all(void)
622 {
623         /* loop over cores, reset all to mask 0 */
624         uint32_t i;
625         for (i = 0; i < RTE_MAX_LCORE; i++) {
626                 if (lcore_states[i].is_service_core) {
627                         lcore_states[i].service_mask = 0;
628                         set_lcore_state(i, ROLE_RTE);
629                         lcore_states[i].runstate = RUNSTATE_STOPPED;
630                 }
631         }
632         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
633                 rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
634
635         rte_smp_wmb();
636
637         return 0;
638 }
639
640 int32_t
641 rte_service_lcore_add(uint32_t lcore)
642 {
643         if (lcore >= RTE_MAX_LCORE)
644                 return -EINVAL;
645         if (lcore_states[lcore].is_service_core)
646                 return -EALREADY;
647
648         set_lcore_state(lcore, ROLE_SERVICE);
649
650         /* ensure that after adding a core the mask and state are defaults */
651         lcore_states[lcore].service_mask = 0;
652         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
653
654         rte_smp_wmb();
655
656         return rte_eal_wait_lcore(lcore);
657 }
658
659 int32_t
660 rte_service_lcore_del(uint32_t lcore)
661 {
662         if (lcore >= RTE_MAX_LCORE)
663                 return -EINVAL;
664
665         struct core_state *cs = &lcore_states[lcore];
666         if (!cs->is_service_core)
667                 return -EINVAL;
668
669         if (cs->runstate != RUNSTATE_STOPPED)
670                 return -EBUSY;
671
672         set_lcore_state(lcore, ROLE_RTE);
673
674         rte_smp_wmb();
675         return 0;
676 }
677
678 int32_t
679 rte_service_lcore_start(uint32_t lcore)
680 {
681         if (lcore >= RTE_MAX_LCORE)
682                 return -EINVAL;
683
684         struct core_state *cs = &lcore_states[lcore];
685         if (!cs->is_service_core)
686                 return -EINVAL;
687
688         if (cs->runstate == RUNSTATE_RUNNING)
689                 return -EALREADY;
690
691         /* set core to run state first, and then launch otherwise it will
692          * return immediately as runstate keeps it in the service poll loop
693          */
694         lcore_states[lcore].runstate = RUNSTATE_RUNNING;
695
696         int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
697         /* returns -EBUSY if the core is already launched, 0 on success */
698         return ret;
699 }
700
701 int32_t
702 rte_service_lcore_stop(uint32_t lcore)
703 {
704         if (lcore >= RTE_MAX_LCORE)
705                 return -EINVAL;
706
707         if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
708                 return -EALREADY;
709
710         uint32_t i;
711         uint64_t service_mask = lcore_states[lcore].service_mask;
712         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
713                 int32_t enabled = service_mask & (UINT64_C(1) << i);
714                 int32_t service_running = rte_service_runstate_get(i);
715                 int32_t only_core = (1 ==
716                         rte_atomic32_read(&rte_services[i].num_mapped_cores));
717
718                 /* if the core is mapped, and the service is running, and this
719                  * is the only core that is mapped, the service would cease to
720                  * run if this core stopped, so fail instead.
721                  */
722                 if (enabled && service_running && only_core)
723                         return -EBUSY;
724         }
725
726         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
727
728         return 0;
729 }
730
731 int32_t
732 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
733 {
734         struct rte_service_spec_impl *s;
735         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
736
737         if (!attr_value)
738                 return -EINVAL;
739
740         switch (attr_id) {
741         case RTE_SERVICE_ATTR_CYCLES:
742                 *attr_value = s->cycles_spent;
743                 return 0;
744         case RTE_SERVICE_ATTR_CALL_COUNT:
745                 *attr_value = s->calls;
746                 return 0;
747         default:
748                 return -EINVAL;
749         }
750 }
751
752 int32_t
753 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
754                            uint64_t *attr_value)
755 {
756         struct core_state *cs;
757
758         if (lcore >= RTE_MAX_LCORE || !attr_value)
759                 return -EINVAL;
760
761         cs = &lcore_states[lcore];
762         if (!cs->is_service_core)
763                 return -ENOTSUP;
764
765         switch (attr_id) {
766         case RTE_SERVICE_LCORE_ATTR_LOOPS:
767                 *attr_value = cs->loops;
768                 return 0;
769         default:
770                 return -EINVAL;
771         }
772 }
773
774 static void
775 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
776                      uint64_t all_cycles, uint32_t reset)
777 {
778         /* avoid divide by zero */
779         if (all_cycles == 0)
780                 all_cycles = 1;
781
782         int calls = 1;
783         if (s->calls != 0)
784                 calls = s->calls;
785
786         if (reset) {
787                 s->cycles_spent = 0;
788                 s->calls = 0;
789                 return;
790         }
791
792         if (f == NULL)
793                 return;
794
795         fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
796                         PRIu64"\tavg: %"PRIu64"\n",
797                         s->spec.name, service_stats_enabled(s), s->calls,
798                         s->cycles_spent, s->cycles_spent / calls);
799 }
800
801 int32_t
802 rte_service_attr_reset_all(uint32_t id)
803 {
804         struct rte_service_spec_impl *s;
805         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
806
807         int reset = 1;
808         rte_service_dump_one(NULL, s, 0, reset);
809         return 0;
810 }
811
812 int32_t
813 rte_service_lcore_attr_reset_all(uint32_t lcore)
814 {
815         struct core_state *cs;
816
817         if (lcore >= RTE_MAX_LCORE)
818                 return -EINVAL;
819
820         cs = &lcore_states[lcore];
821         if (!cs->is_service_core)
822                 return -ENOTSUP;
823
824         cs->loops = 0;
825
826         return 0;
827 }
828
829 static void
830 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
831 {
832         uint32_t i;
833         struct core_state *cs = &lcore_states[lcore];
834
835         fprintf(f, "%02d\t", lcore);
836         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
837                 if (!service_valid(i))
838                         continue;
839                 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
840                 if (reset)
841                         cs->calls_per_service[i] = 0;
842         }
843         fprintf(f, "\n");
844 }
845
846 int32_t
847 rte_service_dump(FILE *f, uint32_t id)
848 {
849         uint32_t i;
850         int print_one = (id != UINT32_MAX);
851
852         uint64_t total_cycles = 0;
853
854         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
855                 if (!service_valid(i))
856                         continue;
857                 total_cycles += rte_services[i].cycles_spent;
858         }
859
860         /* print only the specified service */
861         if (print_one) {
862                 struct rte_service_spec_impl *s;
863                 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
864                 fprintf(f, "Service %s Summary\n", s->spec.name);
865                 uint32_t reset = 0;
866                 rte_service_dump_one(f, s, total_cycles, reset);
867                 return 0;
868         }
869
870         /* print all services, as UINT32_MAX was passed as id */
871         fprintf(f, "Services Summary\n");
872         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
873                 if (!service_valid(i))
874                         continue;
875                 uint32_t reset = 0;
876                 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
877         }
878
879         fprintf(f, "Service Cores Summary\n");
880         for (i = 0; i < RTE_MAX_LCORE; i++) {
881                 if (lcore_config[i].core_role != ROLE_SERVICE)
882                         continue;
883
884                 uint32_t reset = 0;
885                 service_dump_calls_per_lcore(f, i, reset);
886         }
887
888         return 0;
889 }