1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
15 #include <sys/sysinfo.h>
16 #include <sys/types.h>
19 #include <rte_power.h>
20 #include <rte_spinlock.h>
22 #include "power_manager.h"
24 #define RTE_LOGTYPE_POWER_MANAGER RTE_LOGTYPE_USER1
26 #define POWER_SCALE_CORE(DIRECTION, core_num , ret) do { \
27 if (core_num >= POWER_MGR_MAX_CPUS) \
29 if (!(global_enabled_cpus & (1ULL << core_num))) \
31 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl); \
32 ret = rte_power_freq_##DIRECTION(core_num); \
33 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); \
36 #define POWER_SCALE_MASK(DIRECTION, core_mask, ret) do { \
38 for (i = 0; core_mask; core_mask &= ~(1 << i++)) { \
39 if ((core_mask >> i) & 1) { \
40 if (!(global_enabled_cpus & (1ULL << i))) \
42 rte_spinlock_lock(&global_core_freq_info[i].power_sl); \
43 if (rte_power_freq_##DIRECTION(i) != 1) \
45 rte_spinlock_unlock(&global_core_freq_info[i].power_sl); \
51 rte_spinlock_t power_sl;
52 uint32_t freqs[RTE_MAX_LCORE_FREQS];
54 } __rte_cache_aligned;
56 static struct freq_info global_core_freq_info[POWER_MGR_MAX_CPUS];
59 static uint64_t global_enabled_cpus;
61 #define SYSFS_CPU_PATH "/sys/devices/system/cpu/cpu%u/topology/core_id"
64 set_host_cpus_mask(void)
68 unsigned num_cpus = 0;
70 for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
71 snprintf(path, sizeof(path), SYSFS_CPU_PATH, i);
72 if (access(path, F_OK) == 0) {
73 global_enabled_cpus |= 1ULL << i;
95 ci->core_count = get_nprocs_conf();
96 ci->cd = malloc(ci->core_count * sizeof(struct core_details));
98 RTE_LOG(ERR, POWER_MANAGER, "Failed to allocate memory for core info.");
101 for (i = 0; i < ci->core_count; i++) {
102 ci->cd[i].global_enabled_cpus = 1;
103 ci->cd[i].oob_enabled = 0;
104 ci->cd[i].msr_fd = 0;
106 printf("%d cores in system\n", ci->core_count);
111 power_manager_init(void)
113 unsigned int i, num_cpus, num_freqs;
117 num_cpus = set_host_cpus_mask();
119 RTE_LOG(ERR, POWER_MANAGER, "Unable to detected host CPUs, please "
120 "ensure that sufficient privileges exist to inspect sysfs\n");
123 rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
124 cpu_mask = global_enabled_cpus;
125 for (i = 0; cpu_mask; cpu_mask &= ~(1 << i++)) {
126 if (rte_power_init(i) < 0)
127 RTE_LOG(ERR, POWER_MANAGER,
128 "Unable to initialize power manager "
130 num_freqs = rte_power_freqs(i, global_core_freq_info[i].freqs,
131 RTE_MAX_LCORE_FREQS);
132 if (num_freqs == 0) {
133 RTE_LOG(ERR, POWER_MANAGER,
134 "Unable to get frequency list for core %u\n",
136 global_enabled_cpus &= ~(1 << i);
140 global_core_freq_info[i].num_freqs = num_freqs;
141 rte_spinlock_init(&global_core_freq_info[i].power_sl);
143 RTE_LOG(INFO, POWER_MANAGER, "Detected %u host CPUs , enabled core mask:"
144 " 0x%"PRIx64"\n", num_cpus, global_enabled_cpus);
150 power_manager_get_current_frequency(unsigned core_num)
152 uint32_t freq, index;
154 if (core_num >= POWER_MGR_MAX_CPUS) {
155 RTE_LOG(ERR, POWER_MANAGER, "Core(%u) is out of range 0...%d\n",
156 core_num, POWER_MGR_MAX_CPUS-1);
159 if (!(global_enabled_cpus & (1ULL << core_num)))
162 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
163 index = rte_power_get_freq(core_num);
164 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
165 if (index >= POWER_MGR_MAX_CPUS)
168 freq = global_core_freq_info[core_num].freqs[index];
174 power_manager_exit(void)
179 for (i = 0; global_enabled_cpus; global_enabled_cpus &= ~(1 << i++)) {
180 if (rte_power_exit(i) < 0) {
181 RTE_LOG(ERR, POWER_MANAGER, "Unable to shutdown power manager "
186 global_enabled_cpus = 0;
191 power_manager_scale_mask_up(uint64_t core_mask)
195 POWER_SCALE_MASK(up, core_mask, ret);
200 power_manager_scale_mask_down(uint64_t core_mask)
204 POWER_SCALE_MASK(down, core_mask, ret);
209 power_manager_scale_mask_min(uint64_t core_mask)
213 POWER_SCALE_MASK(min, core_mask, ret);
218 power_manager_scale_mask_max(uint64_t core_mask)
222 POWER_SCALE_MASK(max, core_mask, ret);
227 power_manager_enable_turbo_mask(uint64_t core_mask)
231 POWER_SCALE_MASK(enable_turbo, core_mask, ret);
236 power_manager_disable_turbo_mask(uint64_t core_mask)
240 POWER_SCALE_MASK(disable_turbo, core_mask, ret);
245 power_manager_scale_core_up(unsigned core_num)
249 POWER_SCALE_CORE(up, core_num, ret);
254 power_manager_scale_core_down(unsigned core_num)
258 POWER_SCALE_CORE(down, core_num, ret);
263 power_manager_scale_core_min(unsigned core_num)
267 POWER_SCALE_CORE(min, core_num, ret);
272 power_manager_scale_core_max(unsigned core_num)
276 POWER_SCALE_CORE(max, core_num, ret);
281 power_manager_enable_turbo_core(unsigned int core_num)
285 POWER_SCALE_CORE(enable_turbo, core_num, ret);
290 power_manager_disable_turbo_core(unsigned int core_num)
294 POWER_SCALE_CORE(disable_turbo, core_num, ret);
299 power_manager_scale_core_med(unsigned int core_num)
303 if (core_num >= POWER_MGR_MAX_CPUS)
305 if (!(global_enabled_cpus & (1ULL << core_num)))
307 rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
308 ret = rte_power_set_freq(core_num,
309 global_core_freq_info[core_num].num_freqs / 2);
310 rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);