e2795f857e6f6dca60c125d15f49a18700a0dbab
[dpdk.git] / lib / librte_eal / common / rte_service.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <string.h>
10
11 #include <rte_compat.h>
12 #include <rte_service.h>
13 #include <rte_service_component.h>
14
15 #include <rte_eal.h>
16 #include <rte_lcore.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_cycles.h>
20 #include <rte_atomic.h>
21 #include <rte_memory.h>
22 #include <rte_malloc.h>
23 #include <rte_spinlock.h>
24
25 #include "eal_private.h"
26
27 #define RTE_SERVICE_NUM_MAX 64
28
29 #define SERVICE_F_REGISTERED    (1 << 0)
30 #define SERVICE_F_STATS_ENABLED (1 << 1)
31 #define SERVICE_F_START_CHECK   (1 << 2)
32
33 /* runstates for services and lcores, denoting if they are active or not */
34 #define RUNSTATE_STOPPED 0
35 #define RUNSTATE_RUNNING 1
36
37 /* internal representation of a service */
38 struct rte_service_spec_impl {
39         /* public part of the struct */
40         struct rte_service_spec spec;
41
42         /* spin lock that when set indicates a service core is currently
43          * running this service callback. When not set, a core may take the
44          * lock and then run the service callback.
45          */
46         rte_spinlock_t execute_lock;
47
48         /* API set/get-able variables */
49         int8_t app_runstate;
50         int8_t comp_runstate;
51         uint8_t internal_flags;
52
53         /* per service statistics */
54         /* Indicates how many cores the service is mapped to run on.
55          * It does not indicate the number of cores the service is running
56          * on currently.
57          */
58         uint32_t num_mapped_cores;
59         uint64_t calls;
60         uint64_t cycles_spent;
61 } __rte_cache_aligned;
62
63 /* the internal values of a service core */
64 struct core_state {
65         /* map of services IDs are run on this core */
66         uint64_t service_mask;
67         uint8_t runstate; /* running or stopped */
68         uint8_t is_service_core; /* set if core is currently a service core */
69         uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
70         uint64_t loops;
71         uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
72 } __rte_cache_aligned;
73
74 static uint32_t rte_service_count;
75 static struct rte_service_spec_impl *rte_services;
76 static struct core_state *lcore_states;
77 static uint32_t rte_service_library_initialized;
78
79 int32_t
80 rte_service_init(void)
81 {
82         if (rte_service_library_initialized) {
83                 RTE_LOG(NOTICE, EAL,
84                         "service library init() called, init flag %d\n",
85                         rte_service_library_initialized);
86                 return -EALREADY;
87         }
88
89         rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
90                         sizeof(struct rte_service_spec_impl),
91                         RTE_CACHE_LINE_SIZE);
92         if (!rte_services) {
93                 RTE_LOG(ERR, EAL, "error allocating rte services array\n");
94                 goto fail_mem;
95         }
96
97         lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
98                         sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
99         if (!lcore_states) {
100                 RTE_LOG(ERR, EAL, "error allocating core states array\n");
101                 goto fail_mem;
102         }
103
104         int i;
105         int count = 0;
106         struct rte_config *cfg = rte_eal_get_configuration();
107         for (i = 0; i < RTE_MAX_LCORE; i++) {
108                 if (lcore_config[i].core_role == ROLE_SERVICE) {
109                         if ((unsigned int)i == cfg->master_lcore)
110                                 continue;
111                         rte_service_lcore_add(i);
112                         count++;
113                 }
114         }
115
116         rte_service_library_initialized = 1;
117         return 0;
118 fail_mem:
119         rte_free(rte_services);
120         rte_free(lcore_states);
121         return -ENOMEM;
122 }
123
124 void
125 rte_service_finalize(void)
126 {
127         if (!rte_service_library_initialized)
128                 return;
129
130         rte_service_lcore_reset_all();
131         rte_eal_mp_wait_lcore();
132
133         rte_free(rte_services);
134         rte_free(lcore_states);
135
136         rte_service_library_initialized = 0;
137 }
138
139 /* returns 1 if service is registered and has not been unregistered
140  * Returns 0 if service never registered, or has been unregistered
141  */
142 static inline int
143 service_valid(uint32_t id)
144 {
145         return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
146 }
147
148 static struct rte_service_spec_impl *
149 service_get(uint32_t id)
150 {
151         return &rte_services[id];
152 }
153
154 /* validate ID and retrieve service pointer, or return error value */
155 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
156         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
157                 return retval;                                          \
158         service = &rte_services[id];                                    \
159 } while (0)
160
161 /* returns 1 if statistics should be collected for service
162  * Returns 0 if statistics should not be collected for service
163  */
164 static inline int
165 service_stats_enabled(struct rte_service_spec_impl *impl)
166 {
167         return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
168 }
169
170 static inline int
171 service_mt_safe(struct rte_service_spec_impl *s)
172 {
173         return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
174 }
175
176 int32_t
177 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
178 {
179         struct rte_service_spec_impl *s;
180         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
181
182         if (enabled)
183                 s->internal_flags |= SERVICE_F_STATS_ENABLED;
184         else
185                 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
186
187         return 0;
188 }
189
190 int32_t
191 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
192 {
193         struct rte_service_spec_impl *s;
194         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
195
196         if (enabled)
197                 s->internal_flags |= SERVICE_F_START_CHECK;
198         else
199                 s->internal_flags &= ~(SERVICE_F_START_CHECK);
200
201         return 0;
202 }
203
204 uint32_t
205 rte_service_get_count(void)
206 {
207         return rte_service_count;
208 }
209
210 int32_t
211 rte_service_get_by_name(const char *name, uint32_t *service_id)
212 {
213         if (!service_id)
214                 return -EINVAL;
215
216         int i;
217         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
218                 if (service_valid(i) &&
219                                 strcmp(name, rte_services[i].spec.name) == 0) {
220                         *service_id = i;
221                         return 0;
222                 }
223         }
224
225         return -ENODEV;
226 }
227
228 const char *
229 rte_service_get_name(uint32_t id)
230 {
231         struct rte_service_spec_impl *s;
232         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
233         return s->spec.name;
234 }
235
236 int32_t
237 rte_service_probe_capability(uint32_t id, uint32_t capability)
238 {
239         struct rte_service_spec_impl *s;
240         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
241         return !!(s->spec.capabilities & capability);
242 }
243
244 int32_t
245 rte_service_component_register(const struct rte_service_spec *spec,
246                                uint32_t *id_ptr)
247 {
248         uint32_t i;
249         int32_t free_slot = -1;
250
251         if (spec->callback == NULL || strlen(spec->name) == 0)
252                 return -EINVAL;
253
254         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
255                 if (!service_valid(i)) {
256                         free_slot = i;
257                         break;
258                 }
259         }
260
261         if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
262                 return -ENOSPC;
263
264         struct rte_service_spec_impl *s = &rte_services[free_slot];
265         s->spec = *spec;
266         s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
267
268         rte_service_count++;
269
270         if (id_ptr)
271                 *id_ptr = free_slot;
272
273         return 0;
274 }
275
276 int32_t
277 rte_service_component_unregister(uint32_t id)
278 {
279         uint32_t i;
280         struct rte_service_spec_impl *s;
281         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
282
283         rte_service_count--;
284
285         s->internal_flags &= ~(SERVICE_F_REGISTERED);
286
287         /* clear the run-bit in all cores */
288         for (i = 0; i < RTE_MAX_LCORE; i++)
289                 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
290
291         memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
292
293         return 0;
294 }
295
296 int32_t
297 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
298 {
299         struct rte_service_spec_impl *s;
300         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
301
302         /* comp_runstate act as the guard variable. Use store-release
303          * memory order. This synchronizes with load-acquire in
304          * service_run and service_runstate_get function.
305          */
306         if (runstate)
307                 __atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING,
308                         __ATOMIC_RELEASE);
309         else
310                 __atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED,
311                         __ATOMIC_RELEASE);
312
313         return 0;
314 }
315
316 int32_t
317 rte_service_runstate_set(uint32_t id, uint32_t runstate)
318 {
319         struct rte_service_spec_impl *s;
320         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
321
322         /* app_runstate act as the guard variable. Use store-release
323          * memory order. This synchronizes with load-acquire in
324          * service_run runstate_get function.
325          */
326         if (runstate)
327                 __atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING,
328                         __ATOMIC_RELEASE);
329         else
330                 __atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED,
331                         __ATOMIC_RELEASE);
332
333         return 0;
334 }
335
336 int32_t
337 rte_service_runstate_get(uint32_t id)
338 {
339         struct rte_service_spec_impl *s;
340         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
341
342         /* comp_runstate and app_runstate act as the guard variables.
343          * Use load-acquire memory order. This synchronizes with
344          * store-release in service state set functions.
345          */
346         if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) ==
347                         RUNSTATE_RUNNING &&
348             __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) ==
349                         RUNSTATE_RUNNING) {
350                 int check_disabled = !(s->internal_flags &
351                         SERVICE_F_START_CHECK);
352                 int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,
353                         __ATOMIC_RELAXED) > 0);
354
355                 return (check_disabled | lcore_mapped);
356         } else
357                 return 0;
358
359 }
360
361 static inline void
362 service_runner_do_callback(struct rte_service_spec_impl *s,
363                            struct core_state *cs, uint32_t service_idx)
364 {
365         void *userdata = s->spec.callback_userdata;
366
367         if (service_stats_enabled(s)) {
368                 uint64_t start = rte_rdtsc();
369                 s->spec.callback(userdata);
370                 uint64_t end = rte_rdtsc();
371                 s->cycles_spent += end - start;
372                 cs->calls_per_service[service_idx]++;
373                 s->calls++;
374         } else
375                 s->spec.callback(userdata);
376 }
377
378
379 /* Expects the service 's' is valid. */
380 static int32_t
381 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
382             struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
383 {
384         if (!s)
385                 return -EINVAL;
386
387         /* comp_runstate and app_runstate act as the guard variables.
388          * Use load-acquire memory order. This synchronizes with
389          * store-release in service state set functions.
390          */
391         if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) !=
392                         RUNSTATE_RUNNING ||
393             __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) !=
394                         RUNSTATE_RUNNING ||
395             !(service_mask & (UINT64_C(1) << i))) {
396                 cs->service_active_on_lcore[i] = 0;
397                 return -ENOEXEC;
398         }
399
400         cs->service_active_on_lcore[i] = 1;
401
402         if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
403                 if (!rte_spinlock_trylock(&s->execute_lock))
404                         return -EBUSY;
405
406                 service_runner_do_callback(s, cs, i);
407                 rte_spinlock_unlock(&s->execute_lock);
408         } else
409                 service_runner_do_callback(s, cs, i);
410
411         return 0;
412 }
413
414 int32_t
415 rte_service_may_be_active(uint32_t id)
416 {
417         uint32_t ids[RTE_MAX_LCORE] = {0};
418         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
419         int i;
420
421         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))
422                 return -EINVAL;
423
424         for (i = 0; i < lcore_count; i++) {
425                 if (lcore_states[ids[i]].service_active_on_lcore[id])
426                         return 1;
427         }
428
429         return 0;
430 }
431
432 int32_t
433 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
434 {
435         struct core_state *cs = &lcore_states[rte_lcore_id()];
436         struct rte_service_spec_impl *s;
437
438         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
439
440         /* Increment num_mapped_cores to reflect that this core is
441          * now mapped capable of running the service.
442          */
443         __atomic_add_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
444
445         int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
446
447         __atomic_sub_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
448
449         return ret;
450 }
451
452 static int32_t
453 service_runner_func(void *arg)
454 {
455         RTE_SET_USED(arg);
456         uint32_t i;
457         const int lcore = rte_lcore_id();
458         struct core_state *cs = &lcore_states[lcore];
459
460         /* runstate act as the guard variable. Use load-acquire
461          * memory order here to synchronize with store-release
462          * in runstate update functions.
463          */
464         while (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
465                         RUNSTATE_RUNNING) {
466                 const uint64_t service_mask = cs->service_mask;
467
468                 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
469                         if (!service_valid(i))
470                                 continue;
471                         /* return value ignored as no change to code flow */
472                         service_run(i, cs, service_mask, service_get(i), 1);
473                 }
474
475                 cs->loops++;
476         }
477
478         lcore_config[lcore].state = WAIT;
479
480         return 0;
481 }
482
483 int32_t
484 rte_service_lcore_count(void)
485 {
486         int32_t count = 0;
487         uint32_t i;
488         for (i = 0; i < RTE_MAX_LCORE; i++)
489                 count += lcore_states[i].is_service_core;
490         return count;
491 }
492
493 int32_t
494 rte_service_lcore_list(uint32_t array[], uint32_t n)
495 {
496         uint32_t count = rte_service_lcore_count();
497         if (count > n)
498                 return -ENOMEM;
499
500         if (!array)
501                 return -EINVAL;
502
503         uint32_t i;
504         uint32_t idx = 0;
505         for (i = 0; i < RTE_MAX_LCORE; i++) {
506                 struct core_state *cs = &lcore_states[i];
507                 if (cs->is_service_core) {
508                         array[idx] = i;
509                         idx++;
510                 }
511         }
512
513         return count;
514 }
515
516 int32_t
517 rte_service_lcore_count_services(uint32_t lcore)
518 {
519         if (lcore >= RTE_MAX_LCORE)
520                 return -EINVAL;
521
522         struct core_state *cs = &lcore_states[lcore];
523         if (!cs->is_service_core)
524                 return -ENOTSUP;
525
526         return __builtin_popcountll(cs->service_mask);
527 }
528
529 int32_t
530 rte_service_start_with_defaults(void)
531 {
532         /* create a default mapping from cores to services, then start the
533          * services to make them transparent to unaware applications.
534          */
535         uint32_t i;
536         int ret;
537         uint32_t count = rte_service_get_count();
538
539         int32_t lcore_iter = 0;
540         uint32_t ids[RTE_MAX_LCORE] = {0};
541         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
542
543         if (lcore_count == 0)
544                 return -ENOTSUP;
545
546         for (i = 0; (int)i < lcore_count; i++)
547                 rte_service_lcore_start(ids[i]);
548
549         for (i = 0; i < count; i++) {
550                 /* do 1:1 core mapping here, with each service getting
551                  * assigned a single core by default. Adding multiple services
552                  * should multiplex to a single core, or 1:1 if there are the
553                  * same amount of services as service-cores
554                  */
555                 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
556                 if (ret)
557                         return -ENODEV;
558
559                 lcore_iter++;
560                 if (lcore_iter >= lcore_count)
561                         lcore_iter = 0;
562
563                 ret = rte_service_runstate_set(i, 1);
564                 if (ret)
565                         return -ENOEXEC;
566         }
567
568         return 0;
569 }
570
571 static int32_t
572 service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled)
573 {
574         /* validate ID, or return error value */
575         if (sid >= RTE_SERVICE_NUM_MAX || !service_valid(sid) ||
576             lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
577                 return -EINVAL;
578
579         uint64_t sid_mask = UINT64_C(1) << sid;
580         if (set) {
581                 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
582                         sid_mask;
583
584                 if (*set && !lcore_mapped) {
585                         lcore_states[lcore].service_mask |= sid_mask;
586                         __atomic_add_fetch(&rte_services[sid].num_mapped_cores,
587                                 1, __ATOMIC_RELAXED);
588                 }
589                 if (!*set && lcore_mapped) {
590                         lcore_states[lcore].service_mask &= ~(sid_mask);
591                         __atomic_sub_fetch(&rte_services[sid].num_mapped_cores,
592                                 1, __ATOMIC_RELAXED);
593                 }
594         }
595
596         if (enabled)
597                 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
598
599         return 0;
600 }
601
602 int32_t
603 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
604 {
605         uint32_t on = enabled > 0;
606         return service_update(id, lcore, &on, 0);
607 }
608
609 int32_t
610 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
611 {
612         uint32_t enabled;
613         int ret = service_update(id, lcore, 0, &enabled);
614         if (ret == 0)
615                 return enabled;
616         return ret;
617 }
618
619 static void
620 set_lcore_state(uint32_t lcore, int32_t state)
621 {
622         /* mark core state in hugepage backed config */
623         struct rte_config *cfg = rte_eal_get_configuration();
624         cfg->lcore_role[lcore] = state;
625
626         /* mark state in process local lcore_config */
627         lcore_config[lcore].core_role = state;
628
629         /* update per-lcore optimized state tracking */
630         lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
631 }
632
633 int32_t
634 rte_service_lcore_reset_all(void)
635 {
636         /* loop over cores, reset all to mask 0 */
637         uint32_t i;
638         for (i = 0; i < RTE_MAX_LCORE; i++) {
639                 if (lcore_states[i].is_service_core) {
640                         lcore_states[i].service_mask = 0;
641                         set_lcore_state(i, ROLE_RTE);
642                         /* runstate act as guard variable Use
643                          * store-release memory order here to synchronize
644                          * with load-acquire in runstate read functions.
645                          */
646                         __atomic_store_n(&lcore_states[i].runstate,
647                                 RUNSTATE_STOPPED, __ATOMIC_RELEASE);
648                 }
649         }
650         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
651                 __atomic_store_n(&rte_services[i].num_mapped_cores, 0,
652                         __ATOMIC_RELAXED);
653
654         return 0;
655 }
656
657 int32_t
658 rte_service_lcore_add(uint32_t lcore)
659 {
660         if (lcore >= RTE_MAX_LCORE)
661                 return -EINVAL;
662         if (lcore_states[lcore].is_service_core)
663                 return -EALREADY;
664
665         set_lcore_state(lcore, ROLE_SERVICE);
666
667         /* ensure that after adding a core the mask and state are defaults */
668         lcore_states[lcore].service_mask = 0;
669         /* Use store-release memory order here to synchronize with
670          * load-acquire in runstate read functions.
671          */
672         __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
673                 __ATOMIC_RELEASE);
674
675         return rte_eal_wait_lcore(lcore);
676 }
677
678 int32_t
679 rte_service_lcore_del(uint32_t lcore)
680 {
681         if (lcore >= RTE_MAX_LCORE)
682                 return -EINVAL;
683
684         struct core_state *cs = &lcore_states[lcore];
685         if (!cs->is_service_core)
686                 return -EINVAL;
687
688         /* runstate act as the guard variable. Use load-acquire
689          * memory order here to synchronize with store-release
690          * in runstate update functions.
691          */
692         if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) !=
693                         RUNSTATE_STOPPED)
694                 return -EBUSY;
695
696         set_lcore_state(lcore, ROLE_RTE);
697
698         rte_smp_wmb();
699         return 0;
700 }
701
702 int32_t
703 rte_service_lcore_start(uint32_t lcore)
704 {
705         if (lcore >= RTE_MAX_LCORE)
706                 return -EINVAL;
707
708         struct core_state *cs = &lcore_states[lcore];
709         if (!cs->is_service_core)
710                 return -EINVAL;
711
712         /* runstate act as the guard variable. Use load-acquire
713          * memory order here to synchronize with store-release
714          * in runstate update functions.
715          */
716         if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
717                         RUNSTATE_RUNNING)
718                 return -EALREADY;
719
720         /* set core to run state first, and then launch otherwise it will
721          * return immediately as runstate keeps it in the service poll loop
722          */
723         /* Use load-acquire memory order here to synchronize with
724          * store-release in runstate update functions.
725          */
726         __atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE);
727
728         int ret = rte_eal_remote_launch(service_runner_func, 0, lcore);
729         /* returns -EBUSY if the core is already launched, 0 on success */
730         return ret;
731 }
732
733 int32_t
734 rte_service_lcore_stop(uint32_t lcore)
735 {
736         if (lcore >= RTE_MAX_LCORE)
737                 return -EINVAL;
738
739         /* runstate act as the guard variable. Use load-acquire
740          * memory order here to synchronize with store-release
741          * in runstate update functions.
742          */
743         if (__atomic_load_n(&lcore_states[lcore].runstate, __ATOMIC_ACQUIRE) ==
744                         RUNSTATE_STOPPED)
745                 return -EALREADY;
746
747         uint32_t i;
748         uint64_t service_mask = lcore_states[lcore].service_mask;
749         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
750                 int32_t enabled = service_mask & (UINT64_C(1) << i);
751                 int32_t service_running = rte_service_runstate_get(i);
752                 int32_t only_core = (1 ==
753                         __atomic_load_n(&rte_services[i].num_mapped_cores,
754                                 __ATOMIC_RELAXED));
755
756                 /* if the core is mapped, and the service is running, and this
757                  * is the only core that is mapped, the service would cease to
758                  * run if this core stopped, so fail instead.
759                  */
760                 if (enabled && service_running && only_core)
761                         return -EBUSY;
762         }
763
764         /* Use store-release memory order here to synchronize with
765          * load-acquire in runstate read functions.
766          */
767         __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
768                 __ATOMIC_RELEASE);
769
770         return 0;
771 }
772
773 int32_t
774 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
775 {
776         struct rte_service_spec_impl *s;
777         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
778
779         if (!attr_value)
780                 return -EINVAL;
781
782         switch (attr_id) {
783         case RTE_SERVICE_ATTR_CYCLES:
784                 *attr_value = s->cycles_spent;
785                 return 0;
786         case RTE_SERVICE_ATTR_CALL_COUNT:
787                 *attr_value = s->calls;
788                 return 0;
789         default:
790                 return -EINVAL;
791         }
792 }
793
794 int32_t
795 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
796                            uint64_t *attr_value)
797 {
798         struct core_state *cs;
799
800         if (lcore >= RTE_MAX_LCORE || !attr_value)
801                 return -EINVAL;
802
803         cs = &lcore_states[lcore];
804         if (!cs->is_service_core)
805                 return -ENOTSUP;
806
807         switch (attr_id) {
808         case RTE_SERVICE_LCORE_ATTR_LOOPS:
809                 *attr_value = cs->loops;
810                 return 0;
811         default:
812                 return -EINVAL;
813         }
814 }
815
816 static void
817 service_dump_one(FILE *f, struct rte_service_spec_impl *s, uint32_t reset)
818 {
819         /* avoid divide by zero */
820         int calls = 1;
821         if (s->calls != 0)
822                 calls = s->calls;
823
824         if (reset) {
825                 s->cycles_spent = 0;
826                 s->calls = 0;
827                 return;
828         }
829
830         if (f == NULL)
831                 return;
832
833         fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
834                         PRIu64"\tavg: %"PRIu64"\n",
835                         s->spec.name, service_stats_enabled(s), s->calls,
836                         s->cycles_spent, s->cycles_spent / calls);
837 }
838
839 int32_t
840 rte_service_attr_reset_all(uint32_t id)
841 {
842         struct rte_service_spec_impl *s;
843         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
844
845         int reset = 1;
846         service_dump_one(NULL, s, reset);
847         return 0;
848 }
849
850 int32_t
851 rte_service_lcore_attr_reset_all(uint32_t lcore)
852 {
853         struct core_state *cs;
854
855         if (lcore >= RTE_MAX_LCORE)
856                 return -EINVAL;
857
858         cs = &lcore_states[lcore];
859         if (!cs->is_service_core)
860                 return -ENOTSUP;
861
862         cs->loops = 0;
863
864         return 0;
865 }
866
867 static void
868 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
869 {
870         uint32_t i;
871         struct core_state *cs = &lcore_states[lcore];
872
873         fprintf(f, "%02d\t", lcore);
874         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
875                 if (!service_valid(i))
876                         continue;
877                 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
878                 if (reset)
879                         cs->calls_per_service[i] = 0;
880         }
881         fprintf(f, "\n");
882 }
883
884 int32_t
885 rte_service_dump(FILE *f, uint32_t id)
886 {
887         uint32_t i;
888         int print_one = (id != UINT32_MAX);
889
890         /* print only the specified service */
891         if (print_one) {
892                 struct rte_service_spec_impl *s;
893                 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
894                 fprintf(f, "Service %s Summary\n", s->spec.name);
895                 uint32_t reset = 0;
896                 service_dump_one(f, s, reset);
897                 return 0;
898         }
899
900         /* print all services, as UINT32_MAX was passed as id */
901         fprintf(f, "Services Summary\n");
902         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
903                 if (!service_valid(i))
904                         continue;
905                 uint32_t reset = 0;
906                 service_dump_one(f, &rte_services[i], reset);
907         }
908
909         fprintf(f, "Service Cores Summary\n");
910         for (i = 0; i < RTE_MAX_LCORE; i++) {
911                 if (lcore_config[i].core_role != ROLE_SERVICE)
912                         continue;
913
914                 uint32_t reset = 0;
915                 service_dump_calls_per_lcore(f, i, reset);
916         }
917
918         return 0;
919 }