service: fix core mapping reset
[dpdk.git] / lib / librte_eal / common / rte_service.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <string.h>
10
11 #include <rte_compat.h>
12 #include <rte_service.h>
13 #include <rte_service_component.h>
14
15 #include <rte_eal.h>
16 #include <rte_lcore.h>
17 #include <rte_common.h>
18 #include <rte_debug.h>
19 #include <rte_cycles.h>
20 #include <rte_atomic.h>
21 #include <rte_memory.h>
22 #include <rte_malloc.h>
23 #include <rte_spinlock.h>
24
25 #include "eal_private.h"
26
27 #define RTE_SERVICE_NUM_MAX 64
28
29 #define SERVICE_F_REGISTERED    (1 << 0)
30 #define SERVICE_F_STATS_ENABLED (1 << 1)
31 #define SERVICE_F_START_CHECK   (1 << 2)
32
33 /* runstates for services and lcores, denoting if they are active or not */
34 #define RUNSTATE_STOPPED 0
35 #define RUNSTATE_RUNNING 1
36
37 /* internal representation of a service */
38 struct rte_service_spec_impl {
39         /* public part of the struct */
40         struct rte_service_spec spec;
41
42         /* spin lock that when set indicates a service core is currently
43          * running this service callback. When not set, a core may take the
44          * lock and then run the service callback.
45          */
46         rte_spinlock_t execute_lock;
47
48         /* API set/get-able variables */
49         int8_t app_runstate;
50         int8_t comp_runstate;
51         uint8_t internal_flags;
52
53         /* per service statistics */
54         /* Indicates how many cores the service is mapped to run on.
55          * It does not indicate the number of cores the service is running
56          * on currently.
57          */
58         uint32_t num_mapped_cores;
59         uint64_t calls;
60         uint64_t cycles_spent;
61 } __rte_cache_aligned;
62
63 /* the internal values of a service core */
64 struct core_state {
65         /* map of services IDs are run on this core */
66         uint64_t service_mask;
67         uint8_t runstate; /* running or stopped */
68         uint8_t is_service_core; /* set if core is currently a service core */
69         uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
70         uint64_t loops;
71         uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
72 } __rte_cache_aligned;
73
74 static uint32_t rte_service_count;
75 static struct rte_service_spec_impl *rte_services;
76 static struct core_state *lcore_states;
77 static uint32_t rte_service_library_initialized;
78
79 int32_t
80 rte_service_init(void)
81 {
82         if (rte_service_library_initialized) {
83                 RTE_LOG(NOTICE, EAL,
84                         "service library init() called, init flag %d\n",
85                         rte_service_library_initialized);
86                 return -EALREADY;
87         }
88
89         rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
90                         sizeof(struct rte_service_spec_impl),
91                         RTE_CACHE_LINE_SIZE);
92         if (!rte_services) {
93                 RTE_LOG(ERR, EAL, "error allocating rte services array\n");
94                 goto fail_mem;
95         }
96
97         lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
98                         sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
99         if (!lcore_states) {
100                 RTE_LOG(ERR, EAL, "error allocating core states array\n");
101                 goto fail_mem;
102         }
103
104         int i;
105         int count = 0;
106         struct rte_config *cfg = rte_eal_get_configuration();
107         for (i = 0; i < RTE_MAX_LCORE; i++) {
108                 if (lcore_config[i].core_role == ROLE_SERVICE) {
109                         if ((unsigned int)i == cfg->master_lcore)
110                                 continue;
111                         rte_service_lcore_add(i);
112                         count++;
113                 }
114         }
115
116         rte_service_library_initialized = 1;
117         return 0;
118 fail_mem:
119         rte_free(rte_services);
120         rte_free(lcore_states);
121         return -ENOMEM;
122 }
123
124 void
125 rte_service_finalize(void)
126 {
127         if (!rte_service_library_initialized)
128                 return;
129
130         rte_service_lcore_reset_all();
131         rte_eal_mp_wait_lcore();
132
133         rte_free(rte_services);
134         rte_free(lcore_states);
135
136         rte_service_library_initialized = 0;
137 }
138
139 /* returns 1 if service is registered and has not been unregistered
140  * Returns 0 if service never registered, or has been unregistered
141  */
142 static inline int
143 service_valid(uint32_t id)
144 {
145         return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
146 }
147
148 static struct rte_service_spec_impl *
149 service_get(uint32_t id)
150 {
151         return &rte_services[id];
152 }
153
154 /* validate ID and retrieve service pointer, or return error value */
155 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
156         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
157                 return retval;                                          \
158         service = &rte_services[id];                                    \
159 } while (0)
160
161 /* returns 1 if statistics should be collected for service
162  * Returns 0 if statistics should not be collected for service
163  */
164 static inline int
165 service_stats_enabled(struct rte_service_spec_impl *impl)
166 {
167         return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
168 }
169
170 static inline int
171 service_mt_safe(struct rte_service_spec_impl *s)
172 {
173         return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
174 }
175
176 int32_t
177 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
178 {
179         struct rte_service_spec_impl *s;
180         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
181
182         if (enabled)
183                 s->internal_flags |= SERVICE_F_STATS_ENABLED;
184         else
185                 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
186
187         return 0;
188 }
189
190 int32_t
191 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
192 {
193         struct rte_service_spec_impl *s;
194         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
195
196         if (enabled)
197                 s->internal_flags |= SERVICE_F_START_CHECK;
198         else
199                 s->internal_flags &= ~(SERVICE_F_START_CHECK);
200
201         return 0;
202 }
203
204 uint32_t
205 rte_service_get_count(void)
206 {
207         return rte_service_count;
208 }
209
210 int32_t
211 rte_service_get_by_name(const char *name, uint32_t *service_id)
212 {
213         if (!service_id)
214                 return -EINVAL;
215
216         int i;
217         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
218                 if (service_valid(i) &&
219                                 strcmp(name, rte_services[i].spec.name) == 0) {
220                         *service_id = i;
221                         return 0;
222                 }
223         }
224
225         return -ENODEV;
226 }
227
228 const char *
229 rte_service_get_name(uint32_t id)
230 {
231         struct rte_service_spec_impl *s;
232         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
233         return s->spec.name;
234 }
235
236 int32_t
237 rte_service_probe_capability(uint32_t id, uint32_t capability)
238 {
239         struct rte_service_spec_impl *s;
240         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
241         return !!(s->spec.capabilities & capability);
242 }
243
244 int32_t
245 rte_service_component_register(const struct rte_service_spec *spec,
246                                uint32_t *id_ptr)
247 {
248         uint32_t i;
249         int32_t free_slot = -1;
250
251         if (spec->callback == NULL || strlen(spec->name) == 0)
252                 return -EINVAL;
253
254         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
255                 if (!service_valid(i)) {
256                         free_slot = i;
257                         break;
258                 }
259         }
260
261         if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
262                 return -ENOSPC;
263
264         struct rte_service_spec_impl *s = &rte_services[free_slot];
265         s->spec = *spec;
266         s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
267
268         rte_service_count++;
269
270         if (id_ptr)
271                 *id_ptr = free_slot;
272
273         return 0;
274 }
275
276 int32_t
277 rte_service_component_unregister(uint32_t id)
278 {
279         uint32_t i;
280         struct rte_service_spec_impl *s;
281         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
282
283         rte_service_count--;
284
285         s->internal_flags &= ~(SERVICE_F_REGISTERED);
286
287         /* clear the run-bit in all cores */
288         for (i = 0; i < RTE_MAX_LCORE; i++)
289                 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
290
291         memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
292
293         return 0;
294 }
295
296 int32_t
297 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
298 {
299         struct rte_service_spec_impl *s;
300         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
301
302         /* comp_runstate act as the guard variable. Use store-release
303          * memory order. This synchronizes with load-acquire in
304          * service_run and service_runstate_get function.
305          */
306         if (runstate)
307                 __atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING,
308                         __ATOMIC_RELEASE);
309         else
310                 __atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED,
311                         __ATOMIC_RELEASE);
312
313         return 0;
314 }
315
316 int32_t
317 rte_service_runstate_set(uint32_t id, uint32_t runstate)
318 {
319         struct rte_service_spec_impl *s;
320         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
321
322         /* app_runstate act as the guard variable. Use store-release
323          * memory order. This synchronizes with load-acquire in
324          * service_run runstate_get function.
325          */
326         if (runstate)
327                 __atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING,
328                         __ATOMIC_RELEASE);
329         else
330                 __atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED,
331                         __ATOMIC_RELEASE);
332
333         return 0;
334 }
335
336 int32_t
337 rte_service_runstate_get(uint32_t id)
338 {
339         struct rte_service_spec_impl *s;
340         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
341
342         /* comp_runstate and app_runstate act as the guard variables.
343          * Use load-acquire memory order. This synchronizes with
344          * store-release in service state set functions.
345          */
346         if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) ==
347                         RUNSTATE_RUNNING &&
348             __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) ==
349                         RUNSTATE_RUNNING) {
350                 int check_disabled = !(s->internal_flags &
351                         SERVICE_F_START_CHECK);
352                 int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,
353                         __ATOMIC_RELAXED) > 0);
354
355                 return (check_disabled | lcore_mapped);
356         } else
357                 return 0;
358
359 }
360
361 static inline void
362 service_runner_do_callback(struct rte_service_spec_impl *s,
363                            struct core_state *cs, uint32_t service_idx)
364 {
365         void *userdata = s->spec.callback_userdata;
366
367         if (service_stats_enabled(s)) {
368                 uint64_t start = rte_rdtsc();
369                 s->spec.callback(userdata);
370                 uint64_t end = rte_rdtsc();
371                 s->cycles_spent += end - start;
372                 cs->calls_per_service[service_idx]++;
373                 s->calls++;
374         } else
375                 s->spec.callback(userdata);
376 }
377
378
379 /* Expects the service 's' is valid. */
380 static int32_t
381 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
382             struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
383 {
384         if (!s)
385                 return -EINVAL;
386
387         /* comp_runstate and app_runstate act as the guard variables.
388          * Use load-acquire memory order. This synchronizes with
389          * store-release in service state set functions.
390          */
391         if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) !=
392                         RUNSTATE_RUNNING ||
393             __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) !=
394                         RUNSTATE_RUNNING ||
395             !(service_mask & (UINT64_C(1) << i))) {
396                 cs->service_active_on_lcore[i] = 0;
397                 return -ENOEXEC;
398         }
399
400         cs->service_active_on_lcore[i] = 1;
401
402         if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
403                 if (!rte_spinlock_trylock(&s->execute_lock))
404                         return -EBUSY;
405
406                 service_runner_do_callback(s, cs, i);
407                 rte_spinlock_unlock(&s->execute_lock);
408         } else
409                 service_runner_do_callback(s, cs, i);
410
411         return 0;
412 }
413
414 int32_t
415 rte_service_may_be_active(uint32_t id)
416 {
417         uint32_t ids[RTE_MAX_LCORE] = {0};
418         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
419         int i;
420
421         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))
422                 return -EINVAL;
423
424         for (i = 0; i < lcore_count; i++) {
425                 if (lcore_states[ids[i]].service_active_on_lcore[id])
426                         return 1;
427         }
428
429         return 0;
430 }
431
432 int32_t
433 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
434 {
435         struct core_state *cs = &lcore_states[rte_lcore_id()];
436         struct rte_service_spec_impl *s;
437
438         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
439
440         /* Increment num_mapped_cores to reflect that this core is
441          * now mapped capable of running the service.
442          */
443         __atomic_add_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
444
445         int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
446
447         __atomic_sub_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
448
449         return ret;
450 }
451
452 static int32_t
453 service_runner_func(void *arg)
454 {
455         RTE_SET_USED(arg);
456         uint32_t i;
457         const int lcore = rte_lcore_id();
458         struct core_state *cs = &lcore_states[lcore];
459
460         /* runstate act as the guard variable. Use load-acquire
461          * memory order here to synchronize with store-release
462          * in runstate update functions.
463          */
464         while (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
465                         RUNSTATE_RUNNING) {
466                 const uint64_t service_mask = cs->service_mask;
467
468                 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
469                         if (!service_valid(i))
470                                 continue;
471                         /* return value ignored as no change to code flow */
472                         service_run(i, cs, service_mask, service_get(i), 1);
473                 }
474
475                 cs->loops++;
476         }
477
478         return 0;
479 }
480
481 int32_t
482 rte_service_lcore_count(void)
483 {
484         int32_t count = 0;
485         uint32_t i;
486         for (i = 0; i < RTE_MAX_LCORE; i++)
487                 count += lcore_states[i].is_service_core;
488         return count;
489 }
490
491 int32_t
492 rte_service_lcore_list(uint32_t array[], uint32_t n)
493 {
494         uint32_t count = rte_service_lcore_count();
495         if (count > n)
496                 return -ENOMEM;
497
498         if (!array)
499                 return -EINVAL;
500
501         uint32_t i;
502         uint32_t idx = 0;
503         for (i = 0; i < RTE_MAX_LCORE; i++) {
504                 struct core_state *cs = &lcore_states[i];
505                 if (cs->is_service_core) {
506                         array[idx] = i;
507                         idx++;
508                 }
509         }
510
511         return count;
512 }
513
514 int32_t
515 rte_service_lcore_count_services(uint32_t lcore)
516 {
517         if (lcore >= RTE_MAX_LCORE)
518                 return -EINVAL;
519
520         struct core_state *cs = &lcore_states[lcore];
521         if (!cs->is_service_core)
522                 return -ENOTSUP;
523
524         return __builtin_popcountll(cs->service_mask);
525 }
526
527 int32_t
528 rte_service_start_with_defaults(void)
529 {
530         /* create a default mapping from cores to services, then start the
531          * services to make them transparent to unaware applications.
532          */
533         uint32_t i;
534         int ret;
535         uint32_t count = rte_service_get_count();
536
537         int32_t lcore_iter = 0;
538         uint32_t ids[RTE_MAX_LCORE] = {0};
539         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
540
541         if (lcore_count == 0)
542                 return -ENOTSUP;
543
544         for (i = 0; (int)i < lcore_count; i++)
545                 rte_service_lcore_start(ids[i]);
546
547         for (i = 0; i < count; i++) {
548                 /* do 1:1 core mapping here, with each service getting
549                  * assigned a single core by default. Adding multiple services
550                  * should multiplex to a single core, or 1:1 if there are the
551                  * same amount of services as service-cores
552                  */
553                 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
554                 if (ret)
555                         return -ENODEV;
556
557                 lcore_iter++;
558                 if (lcore_iter >= lcore_count)
559                         lcore_iter = 0;
560
561                 ret = rte_service_runstate_set(i, 1);
562                 if (ret)
563                         return -ENOEXEC;
564         }
565
566         return 0;
567 }
568
569 static int32_t
570 service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled)
571 {
572         /* validate ID, or return error value */
573         if (sid >= RTE_SERVICE_NUM_MAX || !service_valid(sid) ||
574             lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
575                 return -EINVAL;
576
577         uint64_t sid_mask = UINT64_C(1) << sid;
578         if (set) {
579                 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
580                         sid_mask;
581
582                 if (*set && !lcore_mapped) {
583                         lcore_states[lcore].service_mask |= sid_mask;
584                         __atomic_add_fetch(&rte_services[sid].num_mapped_cores,
585                                 1, __ATOMIC_RELAXED);
586                 }
587                 if (!*set && lcore_mapped) {
588                         lcore_states[lcore].service_mask &= ~(sid_mask);
589                         __atomic_sub_fetch(&rte_services[sid].num_mapped_cores,
590                                 1, __ATOMIC_RELAXED);
591                 }
592         }
593
594         if (enabled)
595                 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
596
597         return 0;
598 }
599
600 int32_t
601 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
602 {
603         uint32_t on = enabled > 0;
604         return service_update(id, lcore, &on, 0);
605 }
606
607 int32_t
608 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
609 {
610         uint32_t enabled;
611         int ret = service_update(id, lcore, 0, &enabled);
612         if (ret == 0)
613                 return enabled;
614         return ret;
615 }
616
617 static void
618 set_lcore_state(uint32_t lcore, int32_t state)
619 {
620         /* mark core state in hugepage backed config */
621         struct rte_config *cfg = rte_eal_get_configuration();
622         cfg->lcore_role[lcore] = state;
623
624         /* mark state in process local lcore_config */
625         lcore_config[lcore].core_role = state;
626
627         /* update per-lcore optimized state tracking */
628         lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
629 }
630
631 int32_t
632 rte_service_lcore_reset_all(void)
633 {
634         /* loop over cores, reset all to mask 0 */
635         uint32_t i;
636         for (i = 0; i < RTE_MAX_LCORE; i++) {
637                 if (lcore_states[i].is_service_core) {
638                         lcore_states[i].service_mask = 0;
639                         set_lcore_state(i, ROLE_RTE);
640                         /* runstate act as guard variable Use
641                          * store-release memory order here to synchronize
642                          * with load-acquire in runstate read functions.
643                          */
644                         __atomic_store_n(&lcore_states[i].runstate,
645                                 RUNSTATE_STOPPED, __ATOMIC_RELEASE);
646                 }
647         }
648         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
649                 __atomic_store_n(&rte_services[i].num_mapped_cores, 0,
650                         __ATOMIC_RELAXED);
651
652         return 0;
653 }
654
655 int32_t
656 rte_service_lcore_add(uint32_t lcore)
657 {
658         if (lcore >= RTE_MAX_LCORE)
659                 return -EINVAL;
660         if (lcore_states[lcore].is_service_core)
661                 return -EALREADY;
662
663         set_lcore_state(lcore, ROLE_SERVICE);
664
665         /* ensure that after adding a core the mask and state are defaults */
666         lcore_states[lcore].service_mask = 0;
667         /* Use store-release memory order here to synchronize with
668          * load-acquire in runstate read functions.
669          */
670         __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
671                 __ATOMIC_RELEASE);
672
673         return rte_eal_wait_lcore(lcore);
674 }
675
676 int32_t
677 rte_service_lcore_del(uint32_t lcore)
678 {
679         if (lcore >= RTE_MAX_LCORE)
680                 return -EINVAL;
681
682         struct core_state *cs = &lcore_states[lcore];
683         if (!cs->is_service_core)
684                 return -EINVAL;
685
686         /* runstate act as the guard variable. Use load-acquire
687          * memory order here to synchronize with store-release
688          * in runstate update functions.
689          */
690         if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) !=
691                         RUNSTATE_STOPPED)
692                 return -EBUSY;
693
694         set_lcore_state(lcore, ROLE_RTE);
695
696         rte_smp_wmb();
697         return 0;
698 }
699
700 int32_t
701 rte_service_lcore_start(uint32_t lcore)
702 {
703         if (lcore >= RTE_MAX_LCORE)
704                 return -EINVAL;
705
706         struct core_state *cs = &lcore_states[lcore];
707         if (!cs->is_service_core)
708                 return -EINVAL;
709
710         /* runstate act as the guard variable. Use load-acquire
711          * memory order here to synchronize with store-release
712          * in runstate update functions.
713          */
714         if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
715                         RUNSTATE_RUNNING)
716                 return -EALREADY;
717
718         /* set core to run state first, and then launch otherwise it will
719          * return immediately as runstate keeps it in the service poll loop
720          */
721         /* Use load-acquire memory order here to synchronize with
722          * store-release in runstate update functions.
723          */
724         __atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE);
725
726         int ret = rte_eal_remote_launch(service_runner_func, 0, lcore);
727         /* returns -EBUSY if the core is already launched, 0 on success */
728         return ret;
729 }
730
731 int32_t
732 rte_service_lcore_stop(uint32_t lcore)
733 {
734         if (lcore >= RTE_MAX_LCORE)
735                 return -EINVAL;
736
737         /* runstate act as the guard variable. Use load-acquire
738          * memory order here to synchronize with store-release
739          * in runstate update functions.
740          */
741         if (__atomic_load_n(&lcore_states[lcore].runstate, __ATOMIC_ACQUIRE) ==
742                         RUNSTATE_STOPPED)
743                 return -EALREADY;
744
745         uint32_t i;
746         uint64_t service_mask = lcore_states[lcore].service_mask;
747         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
748                 int32_t enabled = service_mask & (UINT64_C(1) << i);
749                 int32_t service_running = rte_service_runstate_get(i);
750                 int32_t only_core = (1 ==
751                         __atomic_load_n(&rte_services[i].num_mapped_cores,
752                                 __ATOMIC_RELAXED));
753
754                 /* if the core is mapped, and the service is running, and this
755                  * is the only core that is mapped, the service would cease to
756                  * run if this core stopped, so fail instead.
757                  */
758                 if (enabled && service_running && only_core)
759                         return -EBUSY;
760         }
761
762         /* Use store-release memory order here to synchronize with
763          * load-acquire in runstate read functions.
764          */
765         __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
766                 __ATOMIC_RELEASE);
767
768         return 0;
769 }
770
771 int32_t
772 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
773 {
774         struct rte_service_spec_impl *s;
775         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
776
777         if (!attr_value)
778                 return -EINVAL;
779
780         switch (attr_id) {
781         case RTE_SERVICE_ATTR_CYCLES:
782                 *attr_value = s->cycles_spent;
783                 return 0;
784         case RTE_SERVICE_ATTR_CALL_COUNT:
785                 *attr_value = s->calls;
786                 return 0;
787         default:
788                 return -EINVAL;
789         }
790 }
791
792 int32_t
793 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
794                            uint64_t *attr_value)
795 {
796         struct core_state *cs;
797
798         if (lcore >= RTE_MAX_LCORE || !attr_value)
799                 return -EINVAL;
800
801         cs = &lcore_states[lcore];
802         if (!cs->is_service_core)
803                 return -ENOTSUP;
804
805         switch (attr_id) {
806         case RTE_SERVICE_LCORE_ATTR_LOOPS:
807                 *attr_value = cs->loops;
808                 return 0;
809         default:
810                 return -EINVAL;
811         }
812 }
813
814 static void
815 service_dump_one(FILE *f, struct rte_service_spec_impl *s, uint32_t reset)
816 {
817         /* avoid divide by zero */
818         int calls = 1;
819         if (s->calls != 0)
820                 calls = s->calls;
821
822         if (reset) {
823                 s->cycles_spent = 0;
824                 s->calls = 0;
825                 return;
826         }
827
828         if (f == NULL)
829                 return;
830
831         fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
832                         PRIu64"\tavg: %"PRIu64"\n",
833                         s->spec.name, service_stats_enabled(s), s->calls,
834                         s->cycles_spent, s->cycles_spent / calls);
835 }
836
837 int32_t
838 rte_service_attr_reset_all(uint32_t id)
839 {
840         struct rte_service_spec_impl *s;
841         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
842
843         int reset = 1;
844         service_dump_one(NULL, s, reset);
845         return 0;
846 }
847
848 int32_t
849 rte_service_lcore_attr_reset_all(uint32_t lcore)
850 {
851         struct core_state *cs;
852
853         if (lcore >= RTE_MAX_LCORE)
854                 return -EINVAL;
855
856         cs = &lcore_states[lcore];
857         if (!cs->is_service_core)
858                 return -ENOTSUP;
859
860         cs->loops = 0;
861
862         return 0;
863 }
864
865 static void
866 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
867 {
868         uint32_t i;
869         struct core_state *cs = &lcore_states[lcore];
870
871         fprintf(f, "%02d\t", lcore);
872         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
873                 if (!service_valid(i))
874                         continue;
875                 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
876                 if (reset)
877                         cs->calls_per_service[i] = 0;
878         }
879         fprintf(f, "\n");
880 }
881
882 int32_t
883 rte_service_dump(FILE *f, uint32_t id)
884 {
885         uint32_t i;
886         int print_one = (id != UINT32_MAX);
887
888         /* print only the specified service */
889         if (print_one) {
890                 struct rte_service_spec_impl *s;
891                 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
892                 fprintf(f, "Service %s Summary\n", s->spec.name);
893                 uint32_t reset = 0;
894                 service_dump_one(f, s, reset);
895                 return 0;
896         }
897
898         /* print all services, as UINT32_MAX was passed as id */
899         fprintf(f, "Services Summary\n");
900         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
901                 if (!service_valid(i))
902                         continue;
903                 uint32_t reset = 0;
904                 service_dump_one(f, &rte_services[i], reset);
905         }
906
907         fprintf(f, "Service Cores Summary\n");
908         for (i = 0; i < RTE_MAX_LCORE; i++) {
909                 if (lcore_config[i].core_role != ROLE_SERVICE)
910                         continue;
911
912                 uint32_t reset = 0;
913                 service_dump_calls_per_lcore(f, i, reset);
914         }
915
916         return 0;
917 }