service: use id in lcore to service map functions
[dpdk.git] / lib / librte_eal / common / rte_service.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <unistd.h>
36 #include <inttypes.h>
37 #include <limits.h>
38 #include <string.h>
39 #include <dirent.h>
40
41 #include <rte_service.h>
42 #include "include/rte_service_component.h"
43
44 #include <rte_eal.h>
45 #include <rte_lcore.h>
46 #include <rte_common.h>
47 #include <rte_debug.h>
48 #include <rte_cycles.h>
49 #include <rte_atomic.h>
50 #include <rte_memory.h>
51 #include <rte_malloc.h>
52
53 #define RTE_SERVICE_NUM_MAX 64
54
55 #define SERVICE_F_REGISTERED    (1 << 0)
56 #define SERVICE_F_STATS_ENABLED (1 << 1)
57
58 /* runstates for services and lcores, denoting if they are active or not */
59 #define RUNSTATE_STOPPED 0
60 #define RUNSTATE_RUNNING 1
61
62 /* internal representation of a service */
63 struct rte_service_spec_impl {
64         /* public part of the struct */
65         struct rte_service_spec spec;
66
67         /* atomic lock that when set indicates a service core is currently
68          * running this service callback. When not set, a core may take the
69          * lock and then run the service callback.
70          */
71         rte_atomic32_t execute_lock;
72
73         /* API set/get-able variables */
74         int32_t runstate;
75         uint8_t internal_flags;
76
77         /* per service statistics */
78         uint32_t num_mapped_cores;
79         uint64_t calls;
80         uint64_t cycles_spent;
81 } __rte_cache_aligned;
82
83 /* the internal values of a service core */
84 struct core_state {
85         /* map of services IDs are run on this core */
86         uint64_t service_mask;
87         uint8_t runstate; /* running or stopped */
88         uint8_t is_service_core; /* set if core is currently a service core */
89
90         /* extreme statistics */
91         uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
92 } __rte_cache_aligned;
93
94 static uint32_t rte_service_count;
95 static struct rte_service_spec_impl *rte_services;
96 static struct core_state *lcore_states;
97 static uint32_t rte_service_library_initialized;
98
99 int32_t rte_service_init(void)
100 {
101         if (rte_service_library_initialized) {
102                 printf("service library init() called, init flag %d\n",
103                         rte_service_library_initialized);
104                 return -EALREADY;
105         }
106
107         rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
108                         sizeof(struct rte_service_spec_impl),
109                         RTE_CACHE_LINE_SIZE);
110         if (!rte_services) {
111                 printf("error allocating rte services array\n");
112                 return -ENOMEM;
113         }
114
115         lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
116                         sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
117         if (!lcore_states) {
118                 printf("error allocating core states array\n");
119                 return -ENOMEM;
120         }
121
122         int i;
123         int count = 0;
124         struct rte_config *cfg = rte_eal_get_configuration();
125         for (i = 0; i < RTE_MAX_LCORE; i++) {
126                 if (lcore_config[i].core_role == ROLE_SERVICE) {
127                         if ((unsigned int)i == cfg->master_lcore)
128                                 continue;
129                         rte_service_lcore_add(i);
130                         count++;
131                 }
132         }
133
134         rte_service_library_initialized = 1;
135         return 0;
136 }
137
138 /* returns 1 if service is registered and has not been unregistered
139  * Returns 0 if service never registered, or has been unregistered
140  */
141 static inline int
142 service_valid(uint32_t id)
143 {
144         return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
145 }
146
147 /* validate ID and retrieve service pointer, or return error value */
148 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
149         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
150                 return retval;                                          \
151         service = &rte_services[id];                                    \
152 } while (0)
153
154 /* returns 1 if statistics should be colleced for service
155  * Returns 0 if statistics should not be collected for service
156  */
157 static inline int
158 service_stats_enabled(struct rte_service_spec_impl *impl)
159 {
160         return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
161 }
162
163 static inline int
164 service_mt_safe(struct rte_service_spec_impl *s)
165 {
166         return s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE;
167 }
168
169 int32_t rte_service_set_stats_enable(struct rte_service_spec *service,
170                                   int32_t enabled)
171 {
172         struct rte_service_spec_impl *impl =
173                 (struct rte_service_spec_impl *)service;
174         if (!impl)
175                 return -EINVAL;
176
177         if (enabled)
178                 impl->internal_flags |= SERVICE_F_STATS_ENABLED;
179         else
180                 impl->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
181
182         return 0;
183 }
184
185 uint32_t
186 rte_service_get_count(void)
187 {
188         return rte_service_count;
189 }
190
191 struct rte_service_spec *
192 rte_service_get_by_id(uint32_t id)
193 {
194         struct rte_service_spec *service = NULL;
195         if (id < rte_service_count)
196                 service = (struct rte_service_spec *)&rte_services[id];
197
198         return service;
199 }
200
201 struct rte_service_spec *rte_service_get_by_name(const char *name)
202 {
203         struct rte_service_spec *service = NULL;
204         int i;
205         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
206                 if (service_valid(i) &&
207                                 strcmp(name, rte_services[i].spec.name) == 0) {
208                         service = (struct rte_service_spec *)&rte_services[i];
209                         break;
210                 }
211         }
212
213         return service;
214 }
215
216 const char *
217 rte_service_get_name(uint32_t id)
218 {
219         struct rte_service_spec_impl *s;
220         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
221         return s->spec.name;
222 }
223
224 int32_t
225 rte_service_probe_capability(uint32_t id, uint32_t capability)
226 {
227         struct rte_service_spec_impl *s;
228         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
229         return s->spec.capabilities & capability;
230 }
231
232 int32_t
233 rte_service_is_running(const struct rte_service_spec *spec)
234 {
235         const struct rte_service_spec_impl *impl =
236                 (const struct rte_service_spec_impl *)spec;
237         if (!impl)
238                 return -EINVAL;
239
240         return (impl->runstate == RUNSTATE_RUNNING) &&
241                 (impl->num_mapped_cores > 0);
242 }
243
244 int32_t
245 rte_service_register(const struct rte_service_spec *spec)
246 {
247         uint32_t i;
248         int32_t free_slot = -1;
249
250         if (spec->callback == NULL || strlen(spec->name) == 0)
251                 return -EINVAL;
252
253         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
254                 if (!service_valid(i)) {
255                         free_slot = i;
256                         break;
257                 }
258         }
259
260         if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
261                 return -ENOSPC;
262
263         struct rte_service_spec_impl *s = &rte_services[free_slot];
264         s->spec = *spec;
265         s->internal_flags |= SERVICE_F_REGISTERED;
266
267         rte_smp_wmb();
268         rte_service_count++;
269
270         return 0;
271 }
272
273 int32_t
274 rte_service_unregister(struct rte_service_spec *spec)
275 {
276         struct rte_service_spec_impl *s = NULL;
277         struct rte_service_spec_impl *spec_impl =
278                 (struct rte_service_spec_impl *)spec;
279
280         uint32_t i;
281         uint32_t service_id;
282         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
283                 if (&rte_services[i] == spec_impl) {
284                         s = spec_impl;
285                         service_id = i;
286                         break;
287                 }
288         }
289
290         if (!s)
291                 return -EINVAL;
292
293         rte_service_count--;
294         rte_smp_wmb();
295
296         s->internal_flags &= ~(SERVICE_F_REGISTERED);
297
298         for (i = 0; i < RTE_MAX_LCORE; i++)
299                 lcore_states[i].service_mask &= ~(UINT64_C(1) << service_id);
300
301         memset(&rte_services[service_id], 0,
302                         sizeof(struct rte_service_spec_impl));
303
304         return 0;
305 }
306
307 int32_t
308 rte_service_start(struct rte_service_spec *service)
309 {
310         struct rte_service_spec_impl *s =
311                 (struct rte_service_spec_impl *)service;
312         s->runstate = RUNSTATE_RUNNING;
313         rte_smp_wmb();
314         return 0;
315 }
316
317 int32_t
318 rte_service_stop(struct rte_service_spec *service)
319 {
320         struct rte_service_spec_impl *s =
321                 (struct rte_service_spec_impl *)service;
322         s->runstate = RUNSTATE_STOPPED;
323         rte_smp_wmb();
324         return 0;
325 }
326
327 static int32_t
328 rte_service_runner_func(void *arg)
329 {
330         RTE_SET_USED(arg);
331         uint32_t i;
332         const int lcore = rte_lcore_id();
333         struct core_state *cs = &lcore_states[lcore];
334
335         while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
336                 const uint64_t service_mask = cs->service_mask;
337                 for (i = 0; i < rte_service_count; i++) {
338                         struct rte_service_spec_impl *s = &rte_services[i];
339                         if (s->runstate != RUNSTATE_RUNNING ||
340                                         !(service_mask & (UINT64_C(1) << i)))
341                                 continue;
342
343                         /* check do we need cmpset, if MT safe or <= 1 core
344                          * mapped, atomic ops are not required.
345                          */
346                         const int need_cmpset = !((service_mt_safe(s) == 0) &&
347                                                 (s->num_mapped_cores > 1));
348                         uint32_t *lock = (uint32_t *)&s->execute_lock;
349
350                         if (need_cmpset || rte_atomic32_cmpset(lock, 0, 1)) {
351                                 void *userdata = s->spec.callback_userdata;
352
353                                 if (service_stats_enabled(s)) {
354                                         uint64_t start = rte_rdtsc();
355                                         s->spec.callback(userdata);
356                                         uint64_t end = rte_rdtsc();
357                                         s->cycles_spent += end - start;
358                                         cs->calls_per_service[i]++;
359                                         s->calls++;
360                                 } else
361                                         s->spec.callback(userdata);
362
363                                 if (need_cmpset)
364                                         rte_atomic32_clear(&s->execute_lock);
365                         }
366                 }
367
368                 rte_smp_rmb();
369         }
370
371         lcore_config[lcore].state = WAIT;
372
373         return 0;
374 }
375
376 int32_t
377 rte_service_lcore_count(void)
378 {
379         int32_t count = 0;
380         uint32_t i;
381         for (i = 0; i < RTE_MAX_LCORE; i++)
382                 count += lcore_states[i].is_service_core;
383         return count;
384 }
385
386 int32_t
387 rte_service_lcore_list(uint32_t array[], uint32_t n)
388 {
389         uint32_t count = rte_service_lcore_count();
390         if (count > n)
391                 return -ENOMEM;
392
393         if (!array)
394                 return -EINVAL;
395
396         uint32_t i;
397         uint32_t idx = 0;
398         for (i = 0; i < RTE_MAX_LCORE; i++) {
399                 struct core_state *cs = &lcore_states[i];
400                 if (cs->is_service_core) {
401                         array[idx] = i;
402                         idx++;
403                 }
404         }
405
406         return count;
407 }
408
409 int32_t
410 rte_service_lcore_count_services(uint32_t lcore)
411 {
412         if (lcore >= RTE_MAX_LCORE)
413                 return -EINVAL;
414
415         struct core_state *cs = &lcore_states[lcore];
416         if (!cs->is_service_core)
417                 return -ENOTSUP;
418
419         return __builtin_popcountll(cs->service_mask);
420 }
421
422 int32_t
423 rte_service_start_with_defaults(void)
424 {
425         /* create a default mapping from cores to services, then start the
426          * services to make them transparent to unaware applications.
427          */
428         uint32_t i;
429         int ret;
430         uint32_t count = rte_service_get_count();
431
432         int32_t lcore_iter = 0;
433         uint32_t ids[RTE_MAX_LCORE];
434         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
435
436         if (lcore_count == 0)
437                 return -ENOTSUP;
438
439         for (i = 0; (int)i < lcore_count; i++)
440                 rte_service_lcore_start(ids[i]);
441
442         for (i = 0; i < count; i++) {
443                 struct rte_service_spec *s = rte_service_get_by_id(i);
444                 if (!s)
445                         return -EINVAL;
446
447                 /* do 1:1 core mapping here, with each service getting
448                  * assigned a single core by default. Adding multiple services
449                  * should multiplex to a single core, or 1:1 if there are the
450                  * same amount of services as service-cores
451                  */
452                 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
453                 if (ret)
454                         return -ENODEV;
455
456                 lcore_iter++;
457                 if (lcore_iter >= lcore_count)
458                         lcore_iter = 0;
459
460                 ret = rte_service_start(s);
461                 if (ret)
462                         return -ENOEXEC;
463         }
464
465         return 0;
466 }
467
468 static int32_t
469 service_update(struct rte_service_spec *service, uint32_t lcore,
470                 uint32_t *set, uint32_t *enabled)
471 {
472         uint32_t i;
473         int32_t sid = -1;
474
475         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
476                 if ((struct rte_service_spec *)&rte_services[i] == service &&
477                                 service_valid(i)) {
478                         sid = i;
479                         break;
480                 }
481         }
482
483         if (sid == -1 || lcore >= RTE_MAX_LCORE)
484                 return -EINVAL;
485
486         if (!lcore_states[lcore].is_service_core)
487                 return -EINVAL;
488
489         uint64_t sid_mask = UINT64_C(1) << sid;
490         if (set) {
491                 if (*set) {
492                         lcore_states[lcore].service_mask |= sid_mask;
493                         rte_services[sid].num_mapped_cores++;
494                 } else {
495                         lcore_states[lcore].service_mask &= ~(sid_mask);
496                         rte_services[sid].num_mapped_cores--;
497                 }
498         }
499
500         if (enabled)
501                 *enabled = (lcore_states[lcore].service_mask & (sid_mask));
502
503         rte_smp_wmb();
504
505         return 0;
506 }
507
508 int32_t
509 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
510 {
511         struct rte_service_spec_impl *s;
512         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
513         uint32_t on = enabled > 0;
514         return service_update(&s->spec, lcore, &on, 0);
515 }
516
517 int32_t
518 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
519 {
520         struct rte_service_spec_impl *s;
521         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
522         uint32_t enabled;
523         int ret = service_update(&s->spec, lcore, 0, &enabled);
524         if (ret == 0)
525                 return enabled;
526         return ret;
527 }
528
529 int32_t rte_service_lcore_reset_all(void)
530 {
531         /* loop over cores, reset all to mask 0 */
532         uint32_t i;
533         for (i = 0; i < RTE_MAX_LCORE; i++) {
534                 lcore_states[i].service_mask = 0;
535                 lcore_states[i].is_service_core = 0;
536                 lcore_states[i].runstate = RUNSTATE_STOPPED;
537         }
538         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
539                 rte_services[i].num_mapped_cores = 0;
540
541         rte_smp_wmb();
542
543         return 0;
544 }
545
546 static void
547 set_lcore_state(uint32_t lcore, int32_t state)
548 {
549         /* mark core state in hugepage backed config */
550         struct rte_config *cfg = rte_eal_get_configuration();
551         cfg->lcore_role[lcore] = state;
552
553         /* mark state in process local lcore_config */
554         lcore_config[lcore].core_role = state;
555
556         /* update per-lcore optimized state tracking */
557         lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
558 }
559
560 int32_t
561 rte_service_lcore_add(uint32_t lcore)
562 {
563         if (lcore >= RTE_MAX_LCORE)
564                 return -EINVAL;
565         if (lcore_states[lcore].is_service_core)
566                 return -EALREADY;
567
568         set_lcore_state(lcore, ROLE_SERVICE);
569
570         /* ensure that after adding a core the mask and state are defaults */
571         lcore_states[lcore].service_mask = 0;
572         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
573
574         rte_smp_wmb();
575         return 0;
576 }
577
578 int32_t
579 rte_service_lcore_del(uint32_t lcore)
580 {
581         if (lcore >= RTE_MAX_LCORE)
582                 return -EINVAL;
583
584         struct core_state *cs = &lcore_states[lcore];
585         if (!cs->is_service_core)
586                 return -EINVAL;
587
588         if (cs->runstate != RUNSTATE_STOPPED)
589                 return -EBUSY;
590
591         set_lcore_state(lcore, ROLE_RTE);
592
593         rte_smp_wmb();
594         return 0;
595 }
596
597 int32_t
598 rte_service_lcore_start(uint32_t lcore)
599 {
600         if (lcore >= RTE_MAX_LCORE)
601                 return -EINVAL;
602
603         struct core_state *cs = &lcore_states[lcore];
604         if (!cs->is_service_core)
605                 return -EINVAL;
606
607         if (cs->runstate == RUNSTATE_RUNNING)
608                 return -EALREADY;
609
610         /* set core to run state first, and then launch otherwise it will
611          * return immediately as runstate keeps it in the service poll loop
612          */
613         lcore_states[lcore].runstate = RUNSTATE_RUNNING;
614
615         int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
616         /* returns -EBUSY if the core is already launched, 0 on success */
617         return ret;
618 }
619
620 int32_t
621 rte_service_lcore_stop(uint32_t lcore)
622 {
623         if (lcore >= RTE_MAX_LCORE)
624                 return -EINVAL;
625
626         if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
627                 return -EALREADY;
628
629         uint32_t i;
630         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
631                 int32_t enabled =
632                         lcore_states[i].service_mask & (UINT64_C(1) << i);
633                 int32_t service_running = rte_services[i].runstate !=
634                                                 RUNSTATE_STOPPED;
635                 int32_t only_core = rte_services[i].num_mapped_cores == 1;
636
637                 /* if the core is mapped, and the service is running, and this
638                  * is the only core that is mapped, the service would cease to
639                  * run if this core stopped, so fail instead.
640                  */
641                 if (enabled && service_running && only_core)
642                         return -EBUSY;
643         }
644
645         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
646
647         return 0;
648 }
649
650 static void
651 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
652                      uint64_t all_cycles, uint32_t reset)
653 {
654         /* avoid divide by zero */
655         if (all_cycles == 0)
656                 all_cycles = 1;
657
658         int calls = 1;
659         if (s->calls != 0)
660                 calls = s->calls;
661
662         fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
663                         PRIu64"\tavg: %"PRIu64"\n",
664                         s->spec.name, service_stats_enabled(s), s->calls,
665                         s->cycles_spent, s->cycles_spent / calls);
666
667         if (reset) {
668                 s->cycles_spent = 0;
669                 s->calls = 0;
670         }
671 }
672
673 static void
674 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
675 {
676         uint32_t i;
677         struct core_state *cs = &lcore_states[lcore];
678
679         fprintf(f, "%02d\t", lcore);
680         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
681                 if (!service_valid(i))
682                         continue;
683                 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
684                 if (reset)
685                         cs->calls_per_service[i] = 0;
686         }
687         fprintf(f, "\n");
688 }
689
690 int32_t rte_service_dump(FILE *f, struct rte_service_spec *service)
691 {
692         uint32_t i;
693
694         uint64_t total_cycles = 0;
695         for (i = 0; i < rte_service_count; i++) {
696                 if (!service_valid(i))
697                         continue;
698                 total_cycles += rte_services[i].cycles_spent;
699         }
700
701         if (service) {
702                 struct rte_service_spec_impl *s =
703                         (struct rte_service_spec_impl *)service;
704                 fprintf(f, "Service %s Summary\n", s->spec.name);
705                 uint32_t reset = 0;
706                 rte_service_dump_one(f, s, total_cycles, reset);
707                 return 0;
708         }
709
710         fprintf(f, "Services Summary\n");
711         for (i = 0; i < rte_service_count; i++) {
712                 uint32_t reset = 1;
713                 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
714         }
715
716         fprintf(f, "Service Cores Summary\n");
717         for (i = 0; i < RTE_MAX_LCORE; i++) {
718                 if (lcore_config[i].core_role != ROLE_SERVICE)
719                         continue;
720
721                 uint32_t reset = 0;
722                 service_dump_calls_per_lcore(f, i, reset);
723         }
724
725         return 0;
726 }