*/
struct virtual_machine_info {
char name[CHANNEL_MGR_MAX_NAME_LEN];
- rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS];
+ uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS];
struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
char channel_mask[POWER_MGR_MAX_CPUS];
uint8_t num_channels;
virVcpuInfoPtr cpuinfo;
unsigned i, j;
int n_vcpus;
- uint64_t mask;
memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
n_vcpus);
vm_info->info.nrVirtCpu = n_vcpus;
}
+ rte_spinlock_lock(&(vm_info->config_spinlock));
for (i = 0; i < vm_info->info.nrVirtCpu; i++) {
- mask = 0;
for (j = 0; j < global_n_host_cpus; j++) {
- if (VIR_CPU_USABLE(global_cpumaps, global_maplen, i, j) > 0) {
- mask |= 1ULL << j;
- }
+ if (VIR_CPU_USABLE(global_cpumaps,
+ global_maplen, i, j) <= 0)
+ continue;
+ vm_info->pcpu_map[i] = j;
}
- rte_atomic64_set(&vm_info->pcpu_mask[i], mask);
}
+ rte_spinlock_unlock(&(vm_info->config_spinlock));
return 0;
}
int
-set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask)
+set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu)
{
- unsigned i = 0;
int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
struct virtual_machine_info *vm_info;
- char mask[POWER_MGR_MAX_CPUS];
-
- memcpy(mask, core_mask, POWER_MGR_MAX_CPUS);
if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
return -1;
}
memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
- for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
- if (mask[i] != 1)
- continue;
- VIR_USE_CPU(global_cpumaps, i);
- if (i >= global_n_host_cpus) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
- "number of CPUs(%u)\n",
- i, global_n_host_cpus);
- return -1;
- }
+
+ VIR_USE_CPU(global_cpumaps, pcpu);
+
+ if (pcpu >= global_n_host_cpus) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
+ "number of CPUs(%u)\n",
+ pcpu, global_n_host_cpus);
+ return -1;
}
+
if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
global_maplen, flags) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
return -1;
}
rte_spinlock_lock(&(vm_info->config_spinlock));
- memcpy(&vm_info->pcpu_mask[vcpu], mask, POWER_MGR_MAX_CPUS);
+ vm_info->pcpu_map[vcpu] = pcpu;
rte_spinlock_unlock(&(vm_info->config_spinlock));
return 0;
-
-}
-
-int
-set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num)
-{
- char mask[POWER_MGR_MAX_CPUS];
-
- memset(mask, 0, POWER_MGR_MAX_CPUS);
-
- mask[core_num] = 1;
-
- return set_pcpus_mask(vm_name, vcpu, mask);
}
-uint64_t
-get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
+uint16_t
+get_pcpu(struct channel_info *chan_info, unsigned int vcpu)
{
struct virtual_machine_info *vm_info =
(struct virtual_machine_info *)chan_info->priv_info;
- if (global_hypervisor_available && (vm_info != NULL))
- return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
- else
+ if (global_hypervisor_available && (vm_info != NULL)) {
+ uint16_t pcpu;
+ rte_spinlock_lock(&(vm_info->config_spinlock));
+ pcpu = vm_info->pcpu_map[vcpu];
+ rte_spinlock_unlock(&(vm_info->config_spinlock));
+ return pcpu;
+ } else
return 0;
}
rte_spinlock_unlock(&(vm_info->config_spinlock));
memcpy(info->name, vm_info->name, sizeof(vm_info->name));
+ rte_spinlock_lock(&(vm_info->config_spinlock));
for (i = 0; i < info->num_vcpus; i++) {
- info->pcpu_mask[i] = rte_atomic64_read(&vm_info->pcpu_mask[i]);
+ info->pcpu_map[i] = vm_info->pcpu_map[i];
}
+ rte_spinlock_unlock(&(vm_info->config_spinlock));
return 0;
}
}
for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) {
- rte_atomic64_init(&new_domain->pcpu_mask[i]);
+ new_domain->pcpu_map[i] = 0;
}
if (update_pcpus_mask(new_domain) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n");
struct vm_info {
char name[CHANNEL_MGR_MAX_NAME_LEN]; /**< VM name */
enum vm_status status; /**< libvirt status */
- uint64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS]; /**< pCPU mask for each vCPU */
+ uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS]; /**< pCPU map to vCPU */
unsigned num_vcpus; /**< number of vCPUS */
struct channel_info channels[CHANNEL_MGR_MAX_CHANNELS]; /**< Array of channel_info */
unsigned num_channels; /**< Number of channels */
void channel_manager_exit(void);
/**
- * Get the Physical CPU mask for VM lcore channel(vcpu), result is assigned to
- * core_mask.
+ * Get the Physical CPU for VM lcore channel(vcpu).
* It is not thread-safe.
*
* @param chan_info
* - 0 on error.
* - >0 on success.
*/
-uint64_t get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu);
-
-/**
- * Set the Physical CPU mask for the specified vCPU.
- * It is not thread-safe.
- *
- * @param name
- * Virtual Machine name to lookup
- *
- * @param vcpu
- * The virtual CPU to set.
- *
- * @param core_mask
- * The core mask of the physical CPU(s) to bind the vCPU
- *
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int set_pcpus_mask(char *vm_name, unsigned int vcpu, char *core_mask);
+uint16_t get_pcpu(struct channel_info *chan_info, unsigned int vcpu);
/**
* Set the Physical CPU for the specified vCPU.
* - 0 on success.
* - Negative on error.
*/
-int set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num);
+int set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu);
+
/**
* Add a VM as specified by name to the Channel Manager. The name must
* correspond to a valid libvirt domain name.
/* Convert vcpu to pcpu. */
struct vm_info info;
int pcpu, count;
- uint64_t mask_u64b;
struct core_info *ci;
ci = get_core_info();
*/
get_info_vm(pol->pkt.vm_name, &info);
for (count = 0; count < pol->pkt.num_vcpu; count++) {
- mask_u64b =
- info.pcpu_mask[pol->pkt.vcpu_to_control[count]];
- for (pcpu = 0; mask_u64b;
- mask_u64b &= ~(1ULL << pcpu++)) {
- if ((mask_u64b >> pcpu) & 1)
- pcpu_monitor(pol, ci, pcpu, count);
- }
+ pcpu = info.pcpu_map[pol->pkt.vcpu_to_control[count]];
+ pcpu_monitor(pol, ci, pcpu, count);
}
} else {
/*
static int
process_request(struct channel_packet *pkt, struct channel_info *chan_info)
{
- uint64_t core_mask;
-
if (chan_info == NULL)
return -1;
return -1;
if (pkt->command == CPU_POWER) {
- core_mask = get_pcpus_mask(chan_info, pkt->resource_id);
- if (core_mask == 0) {
- /*
- * Core mask will be 0 in the case where
- * hypervisor is not available so we're working in
- * the host, so use the core as the mask.
- */
- core_mask = 1ULL << pkt->resource_id;
- }
- if (__builtin_popcountll(core_mask) == 1) {
+ unsigned int core_num;
- unsigned core_num = __builtin_ffsll(core_mask) - 1;
+ core_num = get_pcpu(chan_info, pkt->resource_id);
- switch (pkt->unit) {
- case(CPU_POWER_SCALE_MIN):
- power_manager_scale_core_min(core_num);
+ switch (pkt->unit) {
+ case(CPU_POWER_SCALE_MIN):
+ power_manager_scale_core_min(core_num);
break;
- case(CPU_POWER_SCALE_MAX):
- power_manager_scale_core_max(core_num);
+ case(CPU_POWER_SCALE_MAX):
+ power_manager_scale_core_max(core_num);
break;
- case(CPU_POWER_SCALE_DOWN):
- power_manager_scale_core_down(core_num);
+ case(CPU_POWER_SCALE_DOWN):
+ power_manager_scale_core_down(core_num);
break;
- case(CPU_POWER_SCALE_UP):
- power_manager_scale_core_up(core_num);
+ case(CPU_POWER_SCALE_UP):
+ power_manager_scale_core_up(core_num);
break;
- case(CPU_POWER_ENABLE_TURBO):
- power_manager_enable_turbo_core(core_num);
+ case(CPU_POWER_ENABLE_TURBO):
+ power_manager_enable_turbo_core(core_num);
break;
- case(CPU_POWER_DISABLE_TURBO):
- power_manager_disable_turbo_core(core_num);
+ case(CPU_POWER_DISABLE_TURBO):
+ power_manager_disable_turbo_core(core_num);
+ break;
+ default:
break;
- default:
- break;
- }
}
}