#include <sys/queue.h>
#include <sys/types.h>
+#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/select.h>
#include "channel_manager.h"
#include "channel_commands.h"
#include "channel_monitor.h"
+#include "power_manager.h"
#define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1
-#define ITERATIVE_BITMASK_CHECK_64(mask_u64b, i) \
- for (i = 0; mask_u64b; mask_u64b &= ~(1ULL << i++)) \
- if ((mask_u64b >> i) & 1) \
-
/* Global pointer to libvirt connection */
static virConnectPtr global_vir_conn_ptr;
*/
struct virtual_machine_info {
char name[CHANNEL_MGR_MAX_NAME_LEN];
- rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS];
+ uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS];
struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
- uint64_t channel_mask;
+ char channel_mask[POWER_MGR_MAX_CPUS];
uint8_t num_channels;
enum vm_status status;
virDomainPtr domainPtr;
virVcpuInfoPtr cpuinfo;
unsigned i, j;
int n_vcpus;
- uint64_t mask;
memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
n_vcpus);
vm_info->info.nrVirtCpu = n_vcpus;
}
+ rte_spinlock_lock(&(vm_info->config_spinlock));
for (i = 0; i < vm_info->info.nrVirtCpu; i++) {
- mask = 0;
for (j = 0; j < global_n_host_cpus; j++) {
- if (VIR_CPU_USABLE(global_cpumaps, global_maplen, i, j) > 0) {
- mask |= 1ULL << j;
- }
+ if (VIR_CPU_USABLE(global_cpumaps,
+ global_maplen, i, j) <= 0)
+ continue;
+ vm_info->pcpu_map[i] = j;
}
- rte_atomic64_set(&vm_info->pcpu_mask[i], mask);
}
+ rte_spinlock_unlock(&(vm_info->config_spinlock));
return 0;
}
int
-set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
+set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu)
{
- unsigned i = 0;
int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
struct virtual_machine_info *vm_info;
- uint64_t mask = core_mask;
if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
if (!virDomainIsActive(vm_info->domainPtr)) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
- "mask(0x%"PRIx64") for VM '%s', VM is not active\n",
- vcpu, core_mask, vm_info->name);
+ " for VM '%s', VM is not active\n",
+ vcpu, vm_info->name);
return -1;
}
return -1;
}
memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
- VIR_USE_CPU(global_cpumaps, i);
- if (i >= global_n_host_cpus) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
- "number of CPUs(%u)\n", i, global_n_host_cpus);
- return -1;
- }
+
+ VIR_USE_CPU(global_cpumaps, pcpu);
+
+ if (pcpu >= global_n_host_cpus) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
+ "number of CPUs(%u)\n",
+ pcpu, global_n_host_cpus);
+ return -1;
}
+
if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
global_maplen, flags) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
- "mask(0x%"PRIx64") for VM '%s'\n", vcpu, core_mask,
+ " for VM '%s'\n", vcpu,
vm_info->name);
return -1;
}
- rte_atomic64_set(&vm_info->pcpu_mask[vcpu], core_mask);
+ rte_spinlock_lock(&(vm_info->config_spinlock));
+ vm_info->pcpu_map[vcpu] = pcpu;
+ rte_spinlock_unlock(&(vm_info->config_spinlock));
return 0;
-
-}
-
-int
-set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num)
-{
- uint64_t mask = 1ULL << core_num;
-
- return set_pcpus_mask(vm_name, vcpu, mask);
}
-uint64_t
-get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
+uint16_t
+get_pcpu(struct channel_info *chan_info, unsigned int vcpu)
{
struct virtual_machine_info *vm_info =
(struct virtual_machine_info *)chan_info->priv_info;
- if (global_hypervisor_available && (vm_info != NULL))
- return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
- else
+ if (global_hypervisor_available && (vm_info != NULL)) {
+ uint16_t pcpu;
+ rte_spinlock_lock(&(vm_info->config_spinlock));
+ pcpu = vm_info->pcpu_map[vcpu];
+ rte_spinlock_unlock(&(vm_info->config_spinlock));
+ return pcpu;
+ } else
return 0;
}
channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num)
{
rte_spinlock_lock(&(vm_info->config_spinlock));
- if (vm_info->channel_mask & (1ULL << channel_num)) {
+ if (vm_info->channel_mask[channel_num] == 1) {
rte_spinlock_unlock(&(vm_info->config_spinlock));
return 1;
}
struct timeval tv;
info->fd = socket(AF_UNIX, SOCK_STREAM, 0);
- if (info->fd == -1) {
+ if (info->fd < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) creating socket for '%s'\n",
strerror(errno),
info->channel_path);
return 0;
}
+static int
+open_host_channel(struct channel_info *info)
+{
+ int flags;
+
+ info->fd = open(info->channel_path, O_RDWR | O_RSYNC);
+ if (info->fd < 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) opening fifo for '%s'\n",
+ strerror(errno),
+ info->channel_path);
+ return -1;
+ }
+
+ /* Get current flags */
+ flags = fcntl(info->fd, F_GETFL, 0);
+ if (flags < 0) {
+ RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
+ "'%s'\n", strerror(errno), info->channel_path);
+ return 1;
+ }
+ /* Set to Non Blocking */
+ flags |= O_NONBLOCK;
+ if (fcntl(info->fd, F_SETFL, flags) < 0) {
+ RTE_LOG(WARNING, CHANNEL_MANAGER,
+ "Error(%s) setting non-blocking "
+ "socket for '%s'\n",
+ strerror(errno), info->channel_path);
+ return -1;
+ }
+ return 0;
+}
+
static int
setup_channel_info(struct virtual_machine_info **vm_info_dptr,
struct channel_info **chan_info_dptr, unsigned channel_num)
chan_info->channel_num = channel_num;
chan_info->priv_info = (void *)vm_info;
chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
+ chan_info->type = CHANNEL_TYPE_BINARY;
if (open_non_blocking_channel(chan_info) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: "
"'%s' for VM '%s'\n",
}
rte_spinlock_lock(&(vm_info->config_spinlock));
vm_info->num_channels++;
- vm_info->channel_mask |= 1ULL << channel_num;
+ vm_info->channel_mask[channel_num] = 1;
vm_info->channels[channel_num] = chan_info;
chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
rte_spinlock_unlock(&(vm_info->config_spinlock));
return 0;
}
+static void
+fifo_path(char *dst, unsigned int len)
+{
+ snprintf(dst, len, "%sfifo", CHANNEL_MGR_SOCKET_PATH);
+}
+
+static int
+setup_host_channel_info(struct channel_info **chan_info_dptr,
+ unsigned int channel_num)
+{
+ struct channel_info *chan_info = *chan_info_dptr;
+
+ chan_info->channel_num = channel_num;
+ chan_info->priv_info = (void *)NULL;
+ chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
+ chan_info->type = CHANNEL_TYPE_JSON;
+
+ fifo_path(chan_info->channel_path, sizeof(chan_info->channel_path));
+
+ if (open_host_channel(chan_info) < 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open host channel: "
+ "'%s'\n",
+ chan_info->channel_path);
+ return -1;
+ }
+ if (add_channel_to_monitor(&chan_info) < 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
+ "'%s' to epoll ctl\n",
+ chan_info->channel_path);
+ return -1;
+
+ }
+ chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
+ return 0;
+}
+
int
add_all_channels(const char *vm_name)
{
return num_channels_enabled;
}
+int
+add_host_channel(void)
+{
+ struct channel_info *chan_info;
+ char socket_path[PATH_MAX];
+ int num_channels_enabled = 0;
+ int ret;
+
+ fifo_path(socket_path, sizeof(socket_path));
+
+ ret = mkfifo(socket_path, 0660);
+ if ((errno != EEXIST) && (ret < 0)) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot create fifo '%s' error: "
+ "%s\n", socket_path, strerror(errno));
+ return 0;
+ }
+
+ if (access(socket_path, F_OK) < 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: "
+ "%s\n", socket_path, strerror(errno));
+ return 0;
+ }
+ chan_info = rte_malloc(NULL, sizeof(*chan_info), 0);
+ if (chan_info == NULL) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
+ "channel '%s'\n", socket_path);
+ return 0;
+ }
+ snprintf(chan_info->channel_path,
+ sizeof(chan_info->channel_path), "%s", socket_path);
+ if (setup_host_channel_info(&chan_info, 0) < 0) {
+ rte_free(chan_info);
+ return 0;
+ }
+ num_channels_enabled++;
+
+ return num_channels_enabled;
+}
+
int
remove_channel(struct channel_info **chan_info_dptr)
{
vm_info = (struct virtual_machine_info *)chan_info->priv_info;
rte_spinlock_lock(&(vm_info->config_spinlock));
- vm_info->channel_mask &= ~(1ULL << chan_info->channel_num);
+ vm_info->channel_mask[chan_info->channel_num] = 0;
vm_info->num_channels--;
rte_spinlock_unlock(&(vm_info->config_spinlock));
{
struct virtual_machine_info *vm_info;
unsigned i;
- uint64_t mask;
+ char mask[POWER_MGR_MAX_CPUS];
int num_channels_changed = 0;
if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
}
rte_spinlock_lock(&(vm_info->config_spinlock));
- mask = vm_info->channel_mask;
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
+ memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+ for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ if (mask[i] != 1)
+ continue;
vm_info->channels[i]->status = status;
num_channels_changed++;
}
virNodeInfo node_info;
virDomainPtr *domptr;
- uint64_t mask;
- int i, ii, numVcpus[MAX_VCPUS], cpu, n_vcpus;
+ int i, ii, numVcpus[MAX_VCPUS], n_vcpus;
unsigned int jj;
const char *vm_name;
unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING |
/* Save pcpu in use by libvirt VMs */
for (ii = 0; ii < n_vcpus; ii++) {
- mask = 0;
for (jj = 0; jj < global_n_host_cpus; jj++) {
if (VIR_CPU_USABLE(global_cpumaps,
global_maplen, ii, jj) > 0) {
- mask |= 1ULL << jj;
+ lvm_info[i].pcpus[ii] = jj;
}
}
- ITERATIVE_BITMASK_CHECK_64(mask, cpu) {
- lvm_info[i].pcpus[ii] = cpu;
- }
}
}
}
{
struct virtual_machine_info *vm_info;
unsigned i, channel_num = 0;
- uint64_t mask;
+ char mask[POWER_MGR_MAX_CPUS];
vm_info = find_domain_by_name(vm_name);
if (vm_info == NULL) {
rte_spinlock_lock(&(vm_info->config_spinlock));
- mask = vm_info->channel_mask;
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
+ memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+ for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ if (mask[i] != 1)
+ continue;
info->channels[channel_num].channel_num = i;
memcpy(info->channels[channel_num].channel_path,
- vm_info->channels[i]->channel_path, UNIX_PATH_MAX);
- info->channels[channel_num].status = vm_info->channels[i]->status;
- info->channels[channel_num].fd = vm_info->channels[i]->fd;
+ vm_info->channels[i]->channel_path,
+ UNIX_PATH_MAX);
+ info->channels[channel_num].status =
+ vm_info->channels[i]->status;
+ info->channels[channel_num].fd =
+ vm_info->channels[i]->fd;
channel_num++;
}
rte_spinlock_unlock(&(vm_info->config_spinlock));
memcpy(info->name, vm_info->name, sizeof(vm_info->name));
+ rte_spinlock_lock(&(vm_info->config_spinlock));
for (i = 0; i < info->num_vcpus; i++) {
- info->pcpu_mask[i] = rte_atomic64_read(&vm_info->pcpu_mask[i]);
+ info->pcpu_map[i] = vm_info->pcpu_map[i];
}
+ rte_spinlock_unlock(&(vm_info->config_spinlock));
return 0;
}
}
for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) {
- rte_atomic64_init(&new_domain->pcpu_mask[i]);
+ new_domain->pcpu_map[i] = 0;
}
if (update_pcpus_mask(new_domain) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n");
}
strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
new_domain->name[sizeof(new_domain->name) - 1] = '\0';
- new_domain->channel_mask = 0;
+ memset(new_domain->channel_mask, 0, POWER_MGR_MAX_CPUS);
new_domain->num_channels = 0;
if (!virDomainIsActive(dom_ptr))
channel_manager_exit(void)
{
unsigned i;
- uint64_t mask;
+ char mask[POWER_MGR_MAX_CPUS];
struct virtual_machine_info *vm_info;
LIST_FOREACH(vm_info, &vm_list_head, vms_info) {
rte_spinlock_lock(&(vm_info->config_spinlock));
- mask = vm_info->channel_mask;
- ITERATIVE_BITMASK_CHECK_64(mask, i) {
- remove_channel_from_monitor(vm_info->channels[i]);
+ memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+ for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+ if (mask[i] != 1)
+ continue;
+ remove_channel_from_monitor(
+ vm_info->channels[i]);
close(vm_info->channels[i]->fd);
rte_free(vm_info->channels[i]);
}