rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl); \
} while (0)
-#define POWER_SCALE_MASK(DIRECTION, core_mask, ret) do { \
- int i; \
- for (i = 0; core_mask; core_mask &= ~(1 << i++)) { \
- if ((core_mask >> i) & 1) { \
- if (!(ci.cd[i].global_enabled_cpus)) \
- continue; \
- rte_spinlock_lock(&global_core_freq_info[i].power_sl); \
- if (rte_power_freq_##DIRECTION(i) != 1) \
- ret = -1; \
- rte_spinlock_unlock(&global_core_freq_info[i].power_sl); \
- } \
- } \
-} while (0)
-
struct freq_info {
rte_spinlock_t power_sl;
uint32_t freqs[RTE_MAX_LCORE_FREQS];
unsigned num_freqs;
} __rte_cache_aligned;
-static struct freq_info global_core_freq_info[POWER_MGR_MAX_CPUS];
+static struct freq_info global_core_freq_info[RTE_MAX_LCORE];
struct core_info ci;
ci->core_count = get_nprocs_conf();
ci->branch_ratio_threshold = BRANCH_RATIO_THRESHOLD;
ci->cd = malloc(ci->core_count * sizeof(struct core_details));
+ memset(ci->cd, 0, ci->core_count * sizeof(struct core_details));
if (!ci->cd) {
RTE_LOG(ERR, POWER_MANAGER, "Failed to allocate memory for core info.");
return -1;
}
for (i = 0; i < ci->core_count; i++) {
ci->cd[i].global_enabled_cpus = 1;
- ci->cd[i].oob_enabled = 0;
- ci->cd[i].msr_fd = 0;
}
printf("%d cores in system\n", ci->core_count);
return 0;
struct core_info *ci;
unsigned int max_core_num;
- rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
+ rte_power_set_env(PM_ENV_NOT_SET);
ci = get_core_info();
if (!ci) {
return -1;
}
- if (ci->core_count > POWER_MGR_MAX_CPUS)
- max_core_num = POWER_MGR_MAX_CPUS;
+ if (ci->core_count > RTE_MAX_LCORE)
+ max_core_num = RTE_MAX_LCORE;
else
max_core_num = ci->core_count;
{
uint32_t freq, index;
- if (core_num >= POWER_MGR_MAX_CPUS) {
+ if (core_num >= RTE_MAX_LCORE) {
RTE_LOG(ERR, POWER_MANAGER, "Core(%u) is out of range 0...%d\n",
- core_num, POWER_MGR_MAX_CPUS-1);
+ core_num, RTE_MAX_LCORE-1);
return -1;
}
if (!(ci.cd[core_num].global_enabled_cpus))
rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
index = rte_power_get_freq(core_num);
rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
- if (index >= POWER_MGR_MAX_CPUS)
+ if (index >= RTE_MAX_LCORE_FREQS)
freq = 0;
else
freq = global_core_freq_info[core_num].freqs[index];
return -1;
}
- if (ci->core_count > POWER_MGR_MAX_CPUS)
- max_core_num = POWER_MGR_MAX_CPUS;
+ if (ci->core_count > RTE_MAX_LCORE)
+ max_core_num = RTE_MAX_LCORE;
else
max_core_num = ci->core_count;
return ret;
}
-int
-power_manager_scale_mask_up(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(up, core_mask, ret);
- return ret;
-}
-
-int
-power_manager_scale_mask_down(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(down, core_mask, ret);
- return ret;
-}
-
-int
-power_manager_scale_mask_min(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(min, core_mask, ret);
- return ret;
-}
-
-int
-power_manager_scale_mask_max(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(max, core_mask, ret);
- return ret;
-}
-
-int
-power_manager_enable_turbo_mask(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(enable_turbo, core_mask, ret);
- return ret;
-}
-
-int
-power_manager_disable_turbo_mask(uint64_t core_mask)
-{
- int ret = 0;
-
- POWER_SCALE_MASK(disable_turbo, core_mask, ret);
- return ret;
-}
-
int
power_manager_scale_core_up(unsigned core_num)
{
struct core_info *ci;
ci = get_core_info();
- if (core_num >= POWER_MGR_MAX_CPUS)
+ if (core_num >= RTE_MAX_LCORE)
return -1;
if (!(ci->cd[core_num].global_enabled_cpus))
return -1;