pkt->nb_mac_to_monitor = 0;
pkt->t_boost_status.tbEnabled = false;
- pkt->workload = LOW;
- pkt->policy_to_use = TIME;
- pkt->command = PKT_POLICY;
- pkt->core_type = CORE_TYPE_PHYSICAL;
+ pkt->workload = RTE_POWER_WL_LOW;
+ pkt->policy_to_use = RTE_POWER_POLICY_TIME;
+ pkt->command = RTE_POWER_PKT_POLICY;
+ pkt->core_type = RTE_POWER_CORE_TYPE_PHYSICAL;
if (vm_name == NULL) {
RTE_LOG(ERR, CHANNEL_MONITOR,
char command[32];
strlcpy(command, json_string_value(value), 32);
if (!strcmp(command, "power")) {
- pkt->command = CPU_POWER;
+ pkt->command = RTE_POWER_CPU_POWER;
} else if (!strcmp(command, "create")) {
- pkt->command = PKT_POLICY;
+ pkt->command = RTE_POWER_PKT_POLICY;
} else if (!strcmp(command, "destroy")) {
- pkt->command = PKT_POLICY_REMOVE;
+ pkt->command = RTE_POWER_PKT_POLICY_REMOVE;
} else {
RTE_LOG(ERR, CHANNEL_MONITOR,
"Invalid command received in JSON\n");
char command[32];
strlcpy(command, json_string_value(value), 32);
if (!strcmp(command, "TIME")) {
- pkt->policy_to_use = TIME;
+ pkt->policy_to_use =
+ RTE_POWER_POLICY_TIME;
} else if (!strcmp(command, "TRAFFIC")) {
- pkt->policy_to_use = TRAFFIC;
+ pkt->policy_to_use =
+ RTE_POWER_POLICY_TRAFFIC;
} else if (!strcmp(command, "WORKLOAD")) {
- pkt->policy_to_use = WORKLOAD;
+ pkt->policy_to_use =
+ RTE_POWER_POLICY_WORKLOAD;
} else if (!strcmp(command, "BRANCH_RATIO")) {
- pkt->policy_to_use = BRANCH_RATIO;
+ pkt->policy_to_use =
+ RTE_POWER_POLICY_BRANCH_RATIO;
} else {
RTE_LOG(ERR, CHANNEL_MONITOR,
"Wrong policy_type received in JSON\n");
char command[32];
strlcpy(command, json_string_value(value), 32);
if (!strcmp(command, "HIGH")) {
- pkt->workload = HIGH;
+ pkt->workload = RTE_POWER_WL_HIGH;
} else if (!strcmp(command, "MEDIUM")) {
- pkt->workload = MEDIUM;
+ pkt->workload = RTE_POWER_WL_MEDIUM;
} else if (!strcmp(command, "LOW")) {
- pkt->workload = LOW;
+ pkt->workload = RTE_POWER_WL_LOW;
} else {
RTE_LOG(ERR, CHANNEL_MONITOR,
"Wrong workload received in JSON\n");
char unit[32];
strlcpy(unit, json_string_value(value), 32);
if (!strcmp(unit, "SCALE_UP")) {
- pkt->unit = CPU_POWER_SCALE_UP;
+ pkt->unit = RTE_POWER_SCALE_UP;
} else if (!strcmp(unit, "SCALE_DOWN")) {
- pkt->unit = CPU_POWER_SCALE_DOWN;
+ pkt->unit = RTE_POWER_SCALE_DOWN;
} else if (!strcmp(unit, "SCALE_MAX")) {
- pkt->unit = CPU_POWER_SCALE_MAX;
+ pkt->unit = RTE_POWER_SCALE_MAX;
} else if (!strcmp(unit, "SCALE_MIN")) {
- pkt->unit = CPU_POWER_SCALE_MIN;
+ pkt->unit = RTE_POWER_SCALE_MIN;
} else if (!strcmp(unit, "ENABLE_TURBO")) {
- pkt->unit = CPU_POWER_ENABLE_TURBO;
+ pkt->unit = RTE_POWER_ENABLE_TURBO;
} else if (!strcmp(unit, "DISABLE_TURBO")) {
- pkt->unit = CPU_POWER_DISABLE_TURBO;
+ pkt->unit = RTE_POWER_DISABLE_TURBO;
} else {
RTE_LOG(ERR, CHANNEL_MONITOR,
"Invalid command received in JSON\n");
vm_name);
return -1;
}
- strlcpy(pkt->vm_name, vm_name, VM_MAX_NAME_SZ);
+ strlcpy(pkt->vm_name, vm_name, RTE_POWER_VM_MAX_NAME_SZ);
pkt->resource_id = resource_id;
}
return 0;
{
int ret = 0;
- if (pol->pkt.policy_to_use == BRANCH_RATIO) {
+ if (pol->pkt.policy_to_use == RTE_POWER_POLICY_BRANCH_RATIO) {
ci->cd[pcpu].oob_enabled = 1;
ret = add_core_to_monitor(pcpu);
if (ret == 0)
* differenciate between them when adding them to the branch monitor.
* Virtual cores need to be converted to physical cores.
*/
- if (pol->pkt.core_type == CORE_TYPE_VIRTUAL) {
+ if (pol->pkt.core_type == RTE_POWER_CORE_TYPE_VIRTUAL) {
/*
* If the cores in the policy are virtual, we need to map them
* to physical core. We look up the vm info and use that for
policies[i].pkt = *pkt;
get_pcpu_to_control(&policies[i]);
/* Check Eth dev only for Traffic policy */
- if (policies[i].pkt.policy_to_use == TRAFFIC) {
+ if (policies[i].pkt.policy_to_use ==
+ RTE_POWER_POLICY_TRAFFIC) {
if (get_pfid(&policies[i]) < 0) {
updated = 1;
break;
policies[i].pkt = *pkt;
get_pcpu_to_control(&policies[i]);
/* Check Eth dev only for Traffic policy */
- if (policies[i].pkt.policy_to_use == TRAFFIC) {
+ if (policies[i].pkt.policy_to_use ==
+ RTE_POWER_POLICY_TRAFFIC) {
if (get_pfid(&policies[i]) < 0) {
updated = 1;
break;
/* Format the date and time, down to a single second. */
strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm);
- for (x = 0; x < HOURS; x++) {
+ for (x = 0; x < RTE_POWER_HOURS_PER_DAY; x++) {
if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) {
for (count = 0; count < pol->pkt.num_vcpu; count++) {
int count;
- if (pol->pkt.workload == HIGH) {
+ if (pol->pkt.workload == RTE_POWER_WL_HIGH) {
for (count = 0; count < pol->pkt.num_vcpu; count++) {
if (pol->core_share[count].status != 1)
power_manager_scale_core_max(
pol->core_share[count].pcpu);
}
- } else if (pol->pkt.workload == MEDIUM) {
+ } else if (pol->pkt.workload == RTE_POWER_WL_MEDIUM) {
for (count = 0; count < pol->pkt.num_vcpu; count++) {
if (pol->core_share[count].status != 1)
power_manager_scale_core_med(
pol->core_share[count].pcpu);
}
- } else if (pol->pkt.workload == LOW) {
+ } else if (pol->pkt.workload == RTE_POWER_WL_LOW) {
for (count = 0; count < pol->pkt.num_vcpu; count++) {
if (pol->core_share[count].status != 1)
power_manager_scale_core_min(
struct rte_power_channel_packet *pkt = &pol->pkt;
/*Check policy to use*/
- if (pkt->policy_to_use == TRAFFIC)
+ if (pkt->policy_to_use == RTE_POWER_POLICY_TRAFFIC)
apply_traffic_profile(pol);
- else if (pkt->policy_to_use == TIME)
+ else if (pkt->policy_to_use == RTE_POWER_POLICY_TIME)
apply_time_profile(pol);
- else if (pkt->policy_to_use == WORKLOAD)
+ else if (pkt->policy_to_use == RTE_POWER_POLICY_WORKLOAD)
apply_workload_profile(pol);
}
if (get_info_vm(pkt->vm_name, &info) != 0)
return -1;
- if (!freq_list && vcore_id >= MAX_VCPU_PER_VM)
+ if (!freq_list && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM)
return -1;
if (!info.allow_query)
return -1;
- channel_pkt_freq_list.command = CPU_POWER_FREQ_LIST;
+ channel_pkt_freq_list.command = RTE_POWER_FREQ_LIST;
channel_pkt_freq_list.num_vcpu = info.num_vcpus;
if (freq_list) {
if (get_info_vm(pkt->vm_name, &info) != 0)
return -1;
- if (!list_requested && vcore_id >= MAX_VCPU_PER_VM)
+ if (!list_requested && vcore_id >= RTE_POWER_MAX_VCPU_PER_VM)
return -1;
if (!info.allow_query)
return -1;
- channel_pkt_caps_list.command = CPU_POWER_CAPS_LIST;
+ channel_pkt_caps_list.command = RTE_POWER_CAPS_LIST;
channel_pkt_caps_list.num_vcpu = info.num_vcpus;
if (list_requested) {
CHANNEL_MGR_CHANNEL_PROCESSING) == 0)
return -1;
- if (pkt->command == CPU_POWER) {
+ if (pkt->command == RTE_POWER_CPU_POWER) {
unsigned int core_num;
- if (pkt->core_type == CORE_TYPE_VIRTUAL)
+ if (pkt->core_type == RTE_POWER_CORE_TYPE_VIRTUAL)
core_num = get_pcpu(chan_info, pkt->resource_id);
else
core_num = pkt->resource_id;
bool valid_unit = true;
switch (pkt->unit) {
- case(CPU_POWER_SCALE_MIN):
+ case(RTE_POWER_SCALE_MIN):
scale_res = power_manager_scale_core_min(core_num);
break;
- case(CPU_POWER_SCALE_MAX):
+ case(RTE_POWER_SCALE_MAX):
scale_res = power_manager_scale_core_max(core_num);
break;
- case(CPU_POWER_SCALE_DOWN):
+ case(RTE_POWER_SCALE_DOWN):
scale_res = power_manager_scale_core_down(core_num);
break;
- case(CPU_POWER_SCALE_UP):
+ case(RTE_POWER_SCALE_UP):
scale_res = power_manager_scale_core_up(core_num);
break;
- case(CPU_POWER_ENABLE_TURBO):
+ case(RTE_POWER_ENABLE_TURBO):
scale_res = power_manager_enable_turbo_core(core_num);
break;
- case(CPU_POWER_DISABLE_TURBO):
+ case(RTE_POWER_DISABLE_TURBO):
scale_res = power_manager_disable_turbo_core(core_num);
break;
default:
ret = send_ack_for_received_cmd(pkt,
chan_info,
scale_res >= 0 ?
- CPU_POWER_CMD_ACK :
- CPU_POWER_CMD_NACK);
+ RTE_POWER_CMD_ACK :
+ RTE_POWER_CMD_NACK);
if (ret < 0)
RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n");
} else
}
- if (pkt->command == PKT_POLICY) {
+ if (pkt->command == RTE_POWER_PKT_POLICY) {
RTE_LOG(INFO, CHANNEL_MONITOR, "Processing policy request %s\n",
pkt->vm_name);
int ret = send_ack_for_received_cmd(pkt,
chan_info,
- CPU_POWER_CMD_ACK);
+ RTE_POWER_CMD_ACK);
if (ret < 0)
RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending ack command.\n");
update_policy(pkt);
policy_is_set = 1;
}
- if (pkt->command == PKT_POLICY_REMOVE) {
+ if (pkt->command == RTE_POWER_PKT_POLICY_REMOVE) {
ret = remove_policy(pkt);
if (ret == 0)
RTE_LOG(INFO, CHANNEL_MONITOR,
"Policy %s does not exist\n", pkt->vm_name);
}
- if (pkt->command == CPU_POWER_QUERY_FREQ_LIST ||
- pkt->command == CPU_POWER_QUERY_FREQ) {
+ if (pkt->command == RTE_POWER_QUERY_FREQ_LIST ||
+ pkt->command == RTE_POWER_QUERY_FREQ) {
RTE_LOG(INFO, CHANNEL_MONITOR,
"Frequency for %s requested.\n", pkt->vm_name);
int ret = send_freq(pkt,
chan_info,
- pkt->command == CPU_POWER_QUERY_FREQ_LIST);
+ pkt->command == RTE_POWER_QUERY_FREQ_LIST);
if (ret < 0)
RTE_LOG(ERR, CHANNEL_MONITOR, "Error during frequency sending.\n");
}
- if (pkt->command == CPU_POWER_QUERY_CAPS_LIST ||
- pkt->command == CPU_POWER_QUERY_CAPS) {
+ if (pkt->command == RTE_POWER_QUERY_CAPS_LIST ||
+ pkt->command == RTE_POWER_QUERY_CAPS) {
RTE_LOG(INFO, CHANNEL_MONITOR,
"Capabilities for %s requested.\n", pkt->vm_name);
int ret = send_capabilities(pkt,
chan_info,
- pkt->command == CPU_POWER_QUERY_CAPS_LIST);
+ pkt->command == RTE_POWER_QUERY_CAPS_LIST);
if (ret < 0)
RTE_LOG(ERR, CHANNEL_MONITOR, "Error during sending capabilities.\n");
}
struct policy {
struct rte_power_channel_packet pkt;
- uint32_t pfid[MAX_VFS];
- uint32_t port[MAX_VFS];
+ uint32_t pfid[RTE_POWER_MAX_VFS];
+ uint32_t port[RTE_POWER_MAX_VFS];
unsigned int enabled;
- struct core_share core_share[MAX_VCPU_PER_VM];
+ struct core_share core_share[RTE_POWER_MAX_VCPU_PER_VM];
};
#ifdef __cplusplus
};
struct rte_power_channel_packet *policy;
unsigned short int hours[MAX_HOURS];
- unsigned short int cores[MAX_VCPU_PER_VM];
- unsigned short int ports[MAX_VCPU_PER_VM];
+ unsigned short int cores[RTE_POWER_MAX_VCPU_PER_VM];
+ unsigned short int ports[RTE_POWER_MAX_VCPU_PER_VM];
int i, cnt, idx;
policy = get_policy();
switch (opt) {
/* portmask */
case 'n':
- strlcpy(policy->vm_name, optarg, VM_MAX_NAME_SZ);
+ strlcpy(policy->vm_name, optarg,
+ RTE_POWER_VM_MAX_NAME_SZ);
printf("Setting VM Name to [%s]\n", policy->vm_name);
break;
case 'b':
}
break;
case 'l':
- cnt = parse_set(optarg, cores, MAX_VCPU_PER_VM);
+ cnt = parse_set(optarg, cores,
+ RTE_POWER_MAX_VCPU_PER_VM);
if (cnt < 0) {
printf("Invalid value passed to vcpu-list - [%s]\n",
optarg);
break;
}
idx = 0;
- for (i = 0; i < MAX_VCPU_PER_VM; i++) {
+ for (i = 0; i < RTE_POWER_MAX_VCPU_PER_VM; i++) {
if (cores[i]) {
printf("***Using core %d\n", i);
policy->vcpu_to_control[idx++] = i;
printf("Total cores: %d\n", idx);
break;
case 'p':
- cnt = parse_set(optarg, ports, MAX_VCPU_PER_VM);
+ cnt = parse_set(optarg, ports,
+ RTE_POWER_MAX_VCPU_PER_VM);
if (cnt < 0) {
printf("Invalid value passed to port-list - [%s]\n",
optarg);
break;
}
idx = 0;
- for (i = 0; i < MAX_VCPU_PER_VM; i++) {
+ for (i = 0; i < RTE_POWER_MAX_VCPU_PER_VM; i++) {
if (ports[i]) {
printf("***Using port %d\n", i);
if (set_policy_mac(i, idx++) != 0) {
break;
case 'o':
if (!strcmp(optarg, "TRAFFIC"))
- policy->policy_to_use = TRAFFIC;
+ policy->policy_to_use =
+ RTE_POWER_POLICY_TRAFFIC;
else if (!strcmp(optarg, "TIME"))
- policy->policy_to_use = TIME;
+ policy->policy_to_use =
+ RTE_POWER_POLICY_TIME;
else if (!strcmp(optarg, "WORKLOAD"))
- policy->policy_to_use = WORKLOAD;
+ policy->policy_to_use =
+ RTE_POWER_POLICY_WORKLOAD;
else if (!strcmp(optarg, "BRANCH_RATIO"))
- policy->policy_to_use = BRANCH_RATIO;
+ policy->policy_to_use =
+ RTE_POWER_POLICY_BRANCH_RATIO;
else {
printf("Invalid policy specified: %s\n",
optarg);
pkt->timer_policy.hours_to_use_traffic_profile[0] = 8;
pkt->timer_policy.hours_to_use_traffic_profile[1] = 10;
- pkt->core_type = CORE_TYPE_VIRTUAL;
- pkt->workload = LOW;
- pkt->policy_to_use = TIME;
- pkt->command = PKT_POLICY;
+ pkt->core_type = RTE_POWER_CORE_TYPE_VIRTUAL;
+ pkt->workload = RTE_POWER_WL_LOW;
+ pkt->policy_to_use = RTE_POWER_POLICY_TIME;
+ pkt->command = RTE_POWER_PKT_POLICY;
strlcpy(pkt->vm_name, "ubuntu2", sizeof(pkt->vm_name));
return 0;
RTE_LOG(ERR, GUEST_CLI, "Error receiving message.\n");
return -1;
}
- if (pkt_freq_list->command != CPU_POWER_FREQ_LIST) {
+ if (pkt_freq_list->command != RTE_POWER_FREQ_LIST) {
RTE_LOG(ERR, GUEST_CLI, "Unexpected message received.\n");
return -1;
}
return;
}
- pkt.command = CPU_POWER_QUERY_FREQ_LIST;
+ pkt.command = RTE_POWER_QUERY_FREQ_LIST;
strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name));
query_list = true;
} else {
errno = 0;
lcore_id = (unsigned int)strtol(res->cpu_num, &ep, 10);
- if (errno != 0 || lcore_id >= MAX_VCPU_PER_VM ||
+ if (errno != 0 || lcore_id >= RTE_POWER_MAX_VCPU_PER_VM ||
ep == res->cpu_num) {
cmdline_printf(cl, "Invalid parameter provided.\n");
return;
}
- pkt.command = CPU_POWER_QUERY_FREQ;
+ pkt.command = RTE_POWER_QUERY_FREQ;
strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name));
pkt.resource_id = lcore_id;
}
RTE_LOG(ERR, GUEST_CLI, "Error receiving message.\n");
return -1;
}
- if (pkt_caps_list->command != CPU_POWER_CAPS_LIST) {
+ if (pkt_caps_list->command != RTE_POWER_CAPS_LIST) {
RTE_LOG(ERR, GUEST_CLI, "Unexpected message received.\n");
return -1;
}
return;
}
- pkt.command = CPU_POWER_QUERY_CAPS_LIST;
+ pkt.command = RTE_POWER_QUERY_CAPS_LIST;
strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name));
query_list = true;
} else {
errno = 0;
lcore_id = (unsigned int)strtol(res->cpu_num, &ep, 10);
- if (errno != 0 || lcore_id >= MAX_VCPU_PER_VM ||
+ if (errno != 0 || lcore_id >= RTE_POWER_MAX_VCPU_PER_VM ||
ep == res->cpu_num) {
cmdline_printf(cl, "Invalid parameter provided.\n");
return;
}
- pkt.command = CPU_POWER_QUERY_CAPS;
+ pkt.command = RTE_POWER_QUERY_CAPS;
strlcpy(pkt.vm_name, policy.vm_name, sizeof(pkt.vm_name));
pkt.resource_id = lcore_id;
}
return -1;
switch (pkt.command) {
- case(CPU_POWER_CMD_ACK):
+ case(RTE_POWER_CMD_ACK):
*result = 1;
break;
- case(CPU_POWER_CMD_NACK):
+ case(RTE_POWER_CMD_NACK):
*result = 0;
break;
default:
"Cannot init port %"PRIu8 "\n",
portid);
- for (w = 0; w < MAX_VFS; w++) {
+ for (w = 0; w < RTE_POWER_MAX_VFS; w++) {
eth.addr_bytes[5] = w + 0xf0;
ret = -ENOTSUP;
/* Send a test packet, this command is ignored by the host, but a successful
* send indicates that the host endpoint is monitoring.
*/
- pkt.command = CPU_POWER_CONNECT;
+ pkt.command = RTE_POWER_CPU_POWER_CONNECT;
global_fds[lcore_id] = fd;
ret = guest_channel_send_msg(&pkt, lcore_id);
if (ret != 0) {
lcore_id, RTE_MAX_LCORE-1);
return -1;
}
- pkt[lcore_id].command = CPU_POWER;
+ pkt[lcore_id].command = RTE_POWER_CPU_POWER;
pkt[lcore_id].resource_id = lcore_id;
return guest_channel_host_connect(FD_PATH, lcore_id);
}
int
power_kvm_vm_freq_up(unsigned int lcore_id)
{
- return send_msg(lcore_id, CPU_POWER_SCALE_UP);
+ return send_msg(lcore_id, RTE_POWER_SCALE_UP);
}
int
power_kvm_vm_freq_down(unsigned int lcore_id)
{
- return send_msg(lcore_id, CPU_POWER_SCALE_DOWN);
+ return send_msg(lcore_id, RTE_POWER_SCALE_DOWN);
}
int
power_kvm_vm_freq_max(unsigned int lcore_id)
{
- return send_msg(lcore_id, CPU_POWER_SCALE_MAX);
+ return send_msg(lcore_id, RTE_POWER_SCALE_MAX);
}
int
power_kvm_vm_freq_min(unsigned int lcore_id)
{
- return send_msg(lcore_id, CPU_POWER_SCALE_MIN);
+ return send_msg(lcore_id, RTE_POWER_SCALE_MIN);
}
int
int
power_kvm_vm_enable_turbo(unsigned int lcore_id)
{
- return send_msg(lcore_id, CPU_POWER_ENABLE_TURBO);
+ return send_msg(lcore_id, RTE_POWER_ENABLE_TURBO);
}
int
power_kvm_vm_disable_turbo(unsigned int lcore_id)
{
- return send_msg(lcore_id, CPU_POWER_DISABLE_TURBO);
+ return send_msg(lcore_id, RTE_POWER_DISABLE_TURBO);
}
struct rte_power_core_capabilities;
#include <stdint.h>
#include <stdbool.h>
-#define MAX_VFS 10
-#define VM_MAX_NAME_SZ 32
-#define MAX_VCPU_PER_VM 8
-#define HOURS 24
+#define RTE_POWER_MAX_VFS 10
+#define RTE_POWER_VM_MAX_NAME_SZ 32
+#define RTE_POWER_MAX_VCPU_PER_VM 8
+#define RTE_POWER_HOURS_PER_DAY 24
/* Valid Commands */
-#define CPU_POWER 1
-#define CPU_POWER_CONNECT 2
-#define PKT_POLICY 3
-#define PKT_POLICY_REMOVE 4
+#define RTE_POWER_CPU_POWER 1
+#define RTE_POWER_CPU_POWER_CONNECT 2
+#define RTE_POWER_PKT_POLICY 3
+#define RTE_POWER_PKT_POLICY_REMOVE 4
-#define CORE_TYPE_VIRTUAL 0
-#define CORE_TYPE_PHYSICAL 1
+#define RTE_POWER_CORE_TYPE_VIRTUAL 0
+#define RTE_POWER_CORE_TYPE_PHYSICAL 1
/* CPU Power Command Scaling */
-#define CPU_POWER_SCALE_UP 1
-#define CPU_POWER_SCALE_DOWN 2
-#define CPU_POWER_SCALE_MAX 3
-#define CPU_POWER_SCALE_MIN 4
-#define CPU_POWER_ENABLE_TURBO 5
-#define CPU_POWER_DISABLE_TURBO 6
+#define RTE_POWER_SCALE_UP 1
+#define RTE_POWER_SCALE_DOWN 2
+#define RTE_POWER_SCALE_MAX 3
+#define RTE_POWER_SCALE_MIN 4
+#define RTE_POWER_ENABLE_TURBO 5
+#define RTE_POWER_DISABLE_TURBO 6
/* CPU Power Queries */
-#define CPU_POWER_QUERY_FREQ_LIST 7
-#define CPU_POWER_QUERY_FREQ 8
-#define CPU_POWER_QUERY_CAPS_LIST 9
-#define CPU_POWER_QUERY_CAPS 10
-
-/* --- Outgoing messages --- */
+#define RTE_POWER_QUERY_FREQ_LIST 7
+#define RTE_POWER_QUERY_FREQ 8
+#define RTE_POWER_QUERY_CAPS_LIST 9
+#define RTE_POWER_QUERY_CAPS 10
/* Generic Power Command Response */
-#define CPU_POWER_CMD_ACK 1
-#define CPU_POWER_CMD_NACK 2
+#define RTE_POWER_CMD_ACK 1
+#define RTE_POWER_CMD_NACK 2
/* CPU Power Query Responses */
-#define CPU_POWER_FREQ_LIST 3
-#define CPU_POWER_CAPS_LIST 4
+#define RTE_POWER_FREQ_LIST 3
+#define RTE_POWER_CAPS_LIST 4
-struct rte_power_timer_profile {
- int busy_hours[HOURS];
- int quiet_hours[HOURS];
- int hours_to_use_traffic_profile[HOURS];
+struct rte_power_traffic_policy {
+ uint32_t min_packet_thresh;
+ uint32_t avg_max_packet_thresh;
+ uint32_t max_max_packet_thresh;
};
-enum rte_power_workload_level {HIGH, MEDIUM, LOW};
+struct rte_power_timer_profile {
+ int busy_hours[RTE_POWER_HOURS_PER_DAY];
+ int quiet_hours[RTE_POWER_HOURS_PER_DAY];
+ int hours_to_use_traffic_profile[RTE_POWER_HOURS_PER_DAY];
+};
-enum rte_power_policy {
- TRAFFIC,
- TIME,
- WORKLOAD,
- BRANCH_RATIO
+enum rte_power_workload_level {
+ RTE_POWER_WL_HIGH,
+ RTE_POWER_WL_MEDIUM,
+ RTE_POWER_WL_LOW
};
-struct rte_power_traffic_policy {
- uint32_t min_packet_thresh;
- uint32_t avg_max_packet_thresh;
- uint32_t max_max_packet_thresh;
+enum rte_power_policy {
+ RTE_POWER_POLICY_TRAFFIC,
+ RTE_POWER_POLICY_TIME,
+ RTE_POWER_POLICY_WORKLOAD,
+ RTE_POWER_POLICY_BRANCH_RATIO
};
struct rte_power_turbo_status {
uint64_t resource_id; /**< core_num, device */
uint32_t unit; /**< scale down/up/min/max */
uint32_t command; /**< Power, IO, etc */
- char vm_name[VM_MAX_NAME_SZ];
+ char vm_name[RTE_POWER_VM_MAX_NAME_SZ];
- uint64_t vfid[MAX_VFS];
+ uint64_t vfid[RTE_POWER_MAX_VFS];
int nb_mac_to_monitor;
struct rte_power_traffic_policy traffic_policy;
- uint8_t vcpu_to_control[MAX_VCPU_PER_VM];
+ uint8_t vcpu_to_control[RTE_POWER_MAX_VCPU_PER_VM];
uint8_t num_vcpu;
struct rte_power_timer_profile timer_policy;
bool core_type;
uint64_t resource_id; /**< core_num, device */
uint32_t unit; /**< scale down/up/min/max */
uint32_t command; /**< Power, IO, etc */
- char vm_name[VM_MAX_NAME_SZ];
+ char vm_name[RTE_POWER_VM_MAX_NAME_SZ];
- uint32_t freq_list[MAX_VCPU_PER_VM];
+ uint32_t freq_list[RTE_POWER_MAX_VCPU_PER_VM];
uint8_t num_vcpu;
};
uint64_t resource_id; /**< core_num, device */
uint32_t unit; /**< scale down/up/min/max */
uint32_t command; /**< Power, IO, etc */
- char vm_name[VM_MAX_NAME_SZ];
+ char vm_name[RTE_POWER_VM_MAX_NAME_SZ];
- uint64_t turbo[MAX_VCPU_PER_VM];
- uint64_t priority[MAX_VCPU_PER_VM];
+ uint64_t turbo[RTE_POWER_MAX_VCPU_PER_VM];
+ uint64_t priority[RTE_POWER_MAX_VCPU_PER_VM];
uint8_t num_vcpu;
};