power: update error handling
[dpdk.git] / examples / vm_power_manager / channel_manager.c
index ab856bd..09bfa5c 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
  */
 
 #include <stdio.h>
@@ -42,6 +13,7 @@
 
 #include <sys/queue.h>
 #include <sys/types.h>
+#include <sys/stat.h>
 #include <sys/socket.h>
 #include <sys/select.h>
 
 #include "channel_manager.h"
 #include "channel_commands.h"
 #include "channel_monitor.h"
+#include "power_manager.h"
 
 
 #define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1
 
-#define ITERATIVE_BITMASK_CHECK_64(mask_u64b, i) \
-               for (i = 0; mask_u64b; mask_u64b &= ~(1ULL << i++)) \
-               if ((mask_u64b >> i) & 1) \
-
 /* Global pointer to libvirt connection */
 static virConnectPtr global_vir_conn_ptr;
 
@@ -72,16 +41,17 @@ static unsigned char *global_cpumaps;
 static virVcpuInfo *global_vircpuinfo;
 static size_t global_maplen;
 
-static unsigned global_n_host_cpus;
+static unsigned int global_n_host_cpus;
+static bool global_hypervisor_available;
 
 /*
  * Represents a single Virtual Machine
  */
 struct virtual_machine_info {
        char name[CHANNEL_MGR_MAX_NAME_LEN];
-       rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS];
+       uint16_t pcpu_map[CHANNEL_CMDS_MAX_CPUS];
        struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
-       uint64_t channel_mask;
+       char channel_mask[POWER_MGR_MAX_CPUS];
        uint8_t num_channels;
        enum vm_status status;
        virDomainPtr domainPtr;
@@ -109,7 +79,6 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
        virVcpuInfoPtr cpuinfo;
        unsigned i, j;
        int n_vcpus;
-       uint64_t mask;
 
        memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
 
@@ -149,25 +118,24 @@ update_pcpus:
                                n_vcpus);
                vm_info->info.nrVirtCpu = n_vcpus;
        }
+       rte_spinlock_lock(&(vm_info->config_spinlock));
        for (i = 0; i < vm_info->info.nrVirtCpu; i++) {
-               mask = 0;
                for (j = 0; j < global_n_host_cpus; j++) {
-                       if (VIR_CPU_USABLE(global_cpumaps, global_maplen, i, j) > 0) {
-                               mask |= 1ULL << j;
-                       }
+                       if (VIR_CPU_USABLE(global_cpumaps,
+                                       global_maplen, i, j) <= 0)
+                               continue;
+                       vm_info->pcpu_map[i] = j;
                }
-               rte_atomic64_set(&vm_info->pcpu_mask[i], mask);
        }
+       rte_spinlock_unlock(&(vm_info->config_spinlock));
        return 0;
 }
 
 int
-set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
+set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu)
 {
-       unsigned i = 0;
        int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
        struct virtual_machine_info *vm_info;
-       uint64_t mask = core_mask;
 
        if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
@@ -183,8 +151,8 @@ set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
 
        if (!virDomainIsActive(vm_info->domainPtr)) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
-                               "mask(0x%"PRIx64") for VM '%s', VM is not active\n",
-                               vcpu, core_mask, vm_info->name);
+                               " for VM '%s', VM is not active\n",
+                               vcpu, vm_info->name);
                return -1;
        }
 
@@ -194,47 +162,50 @@ set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
                return -1;
        }
        memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
-       ITERATIVE_BITMASK_CHECK_64(mask, i) {
-               VIR_USE_CPU(global_cpumaps, i);
-               if (i >= global_n_host_cpus) {
-                       RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
-                                       "number of CPUs(%u)\n", i, global_n_host_cpus);
-                       return -1;
-               }
+
+       VIR_USE_CPU(global_cpumaps, pcpu);
+
+       if (pcpu >= global_n_host_cpus) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
+                               "number of CPUs(%u)\n",
+                               pcpu, global_n_host_cpus);
+               return -1;
        }
+
        if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
                        global_maplen, flags) < 0) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
-                               "mask(0x%"PRIx64") for VM '%s'\n", vcpu, core_mask,
+                               " for VM '%s'\n", vcpu,
                                vm_info->name);
                return -1;
        }
-       rte_atomic64_set(&vm_info->pcpu_mask[vcpu], core_mask);
+       rte_spinlock_lock(&(vm_info->config_spinlock));
+       vm_info->pcpu_map[vcpu] = pcpu;
+       rte_spinlock_unlock(&(vm_info->config_spinlock));
        return 0;
-
 }
 
-int
-set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num)
-{
-       uint64_t mask = 1ULL << core_num;
-
-       return set_pcpus_mask(vm_name, vcpu, mask);
-}
-
-uint64_t
-get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
+uint16_t
+get_pcpu(struct channel_info *chan_info, unsigned int vcpu)
 {
        struct virtual_machine_info *vm_info =
                        (struct virtual_machine_info *)chan_info->priv_info;
-       return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
+
+       if (global_hypervisor_available && (vm_info != NULL)) {
+               uint16_t pcpu;
+               rte_spinlock_lock(&(vm_info->config_spinlock));
+               pcpu = vm_info->pcpu_map[vcpu];
+               rte_spinlock_unlock(&(vm_info->config_spinlock));
+               return pcpu;
+       } else
+               return 0;
 }
 
 static inline int
 channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num)
 {
        rte_spinlock_lock(&(vm_info->config_spinlock));
-       if (vm_info->channel_mask & (1ULL << channel_num)) {
+       if (vm_info->channel_mask[channel_num] == 1) {
                rte_spinlock_unlock(&(vm_info->config_spinlock));
                return 1;
        }
@@ -253,7 +224,7 @@ open_non_blocking_channel(struct channel_info *info)
        struct timeval tv;
 
        info->fd = socket(AF_UNIX, SOCK_STREAM, 0);
-       if (info->fd == -1) {
+       if (info->fd < 0) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) creating socket for '%s'\n",
                                strerror(errno),
                                info->channel_path);
@@ -308,6 +279,38 @@ open_non_blocking_channel(struct channel_info *info)
        return 0;
 }
 
+static int
+open_host_channel(struct channel_info *info)
+{
+       int flags;
+
+       info->fd = open(info->channel_path, O_RDWR | O_RSYNC);
+       if (info->fd < 0) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) opening fifo for '%s'\n",
+                               strerror(errno),
+                               info->channel_path);
+               return -1;
+       }
+
+       /* Get current flags */
+       flags = fcntl(info->fd, F_GETFL, 0);
+       if (flags < 0) {
+               RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
+                               "'%s'\n", strerror(errno), info->channel_path);
+               return 1;
+       }
+       /* Set to Non Blocking */
+       flags |= O_NONBLOCK;
+       if (fcntl(info->fd, F_SETFL, flags) < 0) {
+               RTE_LOG(WARNING, CHANNEL_MANAGER,
+                               "Error(%s) setting non-blocking "
+                               "socket for '%s'\n",
+                               strerror(errno), info->channel_path);
+               return -1;
+       }
+       return 0;
+}
+
 static int
 setup_channel_info(struct virtual_machine_info **vm_info_dptr,
                struct channel_info **chan_info_dptr, unsigned channel_num)
@@ -318,6 +321,7 @@ setup_channel_info(struct virtual_machine_info **vm_info_dptr,
        chan_info->channel_num = channel_num;
        chan_info->priv_info = (void *)vm_info;
        chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
+       chan_info->type = CHANNEL_TYPE_BINARY;
        if (open_non_blocking_channel(chan_info) < 0) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: "
                                "'%s' for VM '%s'\n",
@@ -333,13 +337,49 @@ setup_channel_info(struct virtual_machine_info **vm_info_dptr,
        }
        rte_spinlock_lock(&(vm_info->config_spinlock));
        vm_info->num_channels++;
-       vm_info->channel_mask |= 1ULL << channel_num;
+       vm_info->channel_mask[channel_num] = 1;
        vm_info->channels[channel_num] = chan_info;
        chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
        rte_spinlock_unlock(&(vm_info->config_spinlock));
        return 0;
 }
 
+static void
+fifo_path(char *dst, unsigned int len)
+{
+       snprintf(dst, len, "%sfifo", CHANNEL_MGR_SOCKET_PATH);
+}
+
+static int
+setup_host_channel_info(struct channel_info **chan_info_dptr,
+               unsigned int channel_num)
+{
+       struct channel_info *chan_info = *chan_info_dptr;
+
+       chan_info->channel_num = channel_num;
+       chan_info->priv_info = (void *)NULL;
+       chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
+       chan_info->type = CHANNEL_TYPE_JSON;
+
+       fifo_path(chan_info->channel_path, sizeof(chan_info->channel_path));
+
+       if (open_host_channel(chan_info) < 0) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open host channel: "
+                               "'%s'\n",
+                               chan_info->channel_path);
+               return -1;
+       }
+       if (add_channel_to_monitor(&chan_info) < 0) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
+                               "'%s' to epoll ctl\n",
+                               chan_info->channel_path);
+               return -1;
+
+       }
+       chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
+       return 0;
+}
+
 int
 add_all_channels(const char *vm_name)
 {
@@ -494,6 +534,45 @@ add_channels(const char *vm_name, unsigned *channel_list,
        return num_channels_enabled;
 }
 
+int
+add_host_channel(void)
+{
+       struct channel_info *chan_info;
+       char socket_path[PATH_MAX];
+       int num_channels_enabled = 0;
+       int ret;
+
+       fifo_path(socket_path, sizeof(socket_path));
+
+       ret = mkfifo(socket_path, 0660);
+       if ((errno != EEXIST) && (ret < 0)) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot create fifo '%s' error: "
+                               "%s\n", socket_path, strerror(errno));
+               return 0;
+       }
+
+       if (access(socket_path, F_OK) < 0) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: "
+                               "%s\n", socket_path, strerror(errno));
+               return 0;
+       }
+       chan_info = rte_malloc(NULL, sizeof(*chan_info), 0);
+       if (chan_info == NULL) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
+                               "channel '%s'\n", socket_path);
+               return 0;
+       }
+       snprintf(chan_info->channel_path,
+                       sizeof(chan_info->channel_path), "%s", socket_path);
+       if (setup_host_channel_info(&chan_info, 0) < 0) {
+               rte_free(chan_info);
+               return 0;
+       }
+       num_channels_enabled++;
+
+       return num_channels_enabled;
+}
+
 int
 remove_channel(struct channel_info **chan_info_dptr)
 {
@@ -505,7 +584,7 @@ remove_channel(struct channel_info **chan_info_dptr)
        vm_info = (struct virtual_machine_info *)chan_info->priv_info;
 
        rte_spinlock_lock(&(vm_info->config_spinlock));
-       vm_info->channel_mask &= ~(1ULL << chan_info->channel_num);
+       vm_info->channel_mask[chan_info->channel_num] = 0;
        vm_info->num_channels--;
        rte_spinlock_unlock(&(vm_info->config_spinlock));
 
@@ -518,7 +597,7 @@ set_channel_status_all(const char *vm_name, enum channel_status status)
 {
        struct virtual_machine_info *vm_info;
        unsigned i;
-       uint64_t mask;
+       char mask[POWER_MGR_MAX_CPUS];
        int num_channels_changed = 0;
 
        if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
@@ -534,8 +613,10 @@ set_channel_status_all(const char *vm_name, enum channel_status status)
        }
 
        rte_spinlock_lock(&(vm_info->config_spinlock));
-       mask = vm_info->channel_mask;
-       ITERATIVE_BITMASK_CHECK_64(mask, i) {
+       memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+       for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+               if (mask[i] != 1)
+                       continue;
                vm_info->channels[i]->status = status;
                num_channels_changed++;
        }
@@ -580,14 +661,15 @@ get_all_vm(int *num_vm, int *num_vcpu)
 
        virNodeInfo node_info;
        virDomainPtr *domptr;
-       uint64_t mask;
-       int i, ii, numVcpus[MAX_VCPUS], cpu, n_vcpus;
+       int i, ii, numVcpus[MAX_VCPUS], n_vcpus;
        unsigned int jj;
        const char *vm_name;
        unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING |
                                VIR_CONNECT_LIST_DOMAINS_PERSISTENT;
        unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG;
 
+       if (!global_hypervisor_available)
+               return;
 
        memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
        if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) {
@@ -627,16 +709,12 @@ get_all_vm(int *num_vm, int *num_vcpu)
 
                /* Save pcpu in use by libvirt VMs */
                for (ii = 0; ii < n_vcpus; ii++) {
-                       mask = 0;
                        for (jj = 0; jj < global_n_host_cpus; jj++) {
                                if (VIR_CPU_USABLE(global_cpumaps,
                                                global_maplen, ii, jj) > 0) {
-                                       mask |= 1ULL << jj;
+                                       lvm_info[i].pcpus[ii] = jj;
                                }
                        }
-                       ITERATIVE_BITMASK_CHECK_64(mask, cpu) {
-                               lvm_info[i].pcpus[ii] = cpu;
-                       }
                }
        }
 }
@@ -646,7 +724,7 @@ get_info_vm(const char *vm_name, struct vm_info *info)
 {
        struct virtual_machine_info *vm_info;
        unsigned i, channel_num = 0;
-       uint64_t mask;
+       char mask[POWER_MGR_MAX_CPUS];
 
        vm_info = find_domain_by_name(vm_name);
        if (vm_info == NULL) {
@@ -659,13 +737,18 @@ get_info_vm(const char *vm_name, struct vm_info *info)
 
        rte_spinlock_lock(&(vm_info->config_spinlock));
 
-       mask = vm_info->channel_mask;
-       ITERATIVE_BITMASK_CHECK_64(mask, i) {
+       memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+       for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+               if (mask[i] != 1)
+                       continue;
                info->channels[channel_num].channel_num = i;
                memcpy(info->channels[channel_num].channel_path,
-                               vm_info->channels[i]->channel_path, UNIX_PATH_MAX);
-               info->channels[channel_num].status = vm_info->channels[i]->status;
-               info->channels[channel_num].fd = vm_info->channels[i]->fd;
+                               vm_info->channels[i]->channel_path,
+                               UNIX_PATH_MAX);
+               info->channels[channel_num].status =
+                               vm_info->channels[i]->status;
+               info->channels[channel_num].fd =
+                               vm_info->channels[i]->fd;
                channel_num++;
        }
 
@@ -674,9 +757,11 @@ get_info_vm(const char *vm_name, struct vm_info *info)
        rte_spinlock_unlock(&(vm_info->config_spinlock));
 
        memcpy(info->name, vm_info->name, sizeof(vm_info->name));
+       rte_spinlock_lock(&(vm_info->config_spinlock));
        for (i = 0; i < info->num_vcpus; i++) {
-               info->pcpu_mask[i] = rte_atomic64_read(&vm_info->pcpu_mask[i]);
+               info->pcpu_map[i] = vm_info->pcpu_map[i];
        }
+       rte_spinlock_unlock(&(vm_info->config_spinlock));
        return 0;
 }
 
@@ -726,7 +811,7 @@ add_vm(const char *vm_name)
        }
 
        for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) {
-               rte_atomic64_init(&new_domain->pcpu_mask[i]);
+               new_domain->pcpu_map[i] = 0;
        }
        if (update_pcpus_mask(new_domain) < 0) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n");
@@ -735,7 +820,7 @@ add_vm(const char *vm_name)
        }
        strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
        new_domain->name[sizeof(new_domain->name) - 1] = '\0';
-       new_domain->channel_mask = 0;
+       memset(new_domain->channel_mask, 0, POWER_MGR_MAX_CPUS);
        new_domain->num_channels = 0;
 
        if (!virDomainIsActive(dom_ptr))
@@ -797,38 +882,42 @@ connect_hypervisor(const char *path)
        }
        return 0;
 }
-
 int
-channel_manager_init(const char *path)
+channel_manager_init(const char *path __rte_unused)
 {
        virNodeInfo info;
 
        LIST_INIT(&vm_list_head);
        if (connect_hypervisor(path) < 0) {
-               RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
-               return -1;
-       }
-
-       global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
+               global_n_host_cpus = 64;
+               global_hypervisor_available = 0;
+               RTE_LOG(INFO, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
+       } else {
+               global_hypervisor_available = 1;
+
+               global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
+
+               global_vircpuinfo = rte_zmalloc(NULL,
+                               sizeof(*global_vircpuinfo) *
+                               CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
+               if (global_vircpuinfo == NULL) {
+                       RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
+                       goto error;
+               }
+               global_cpumaps = rte_zmalloc(NULL,
+                               CHANNEL_CMDS_MAX_CPUS * global_maplen,
+                               RTE_CACHE_LINE_SIZE);
+               if (global_cpumaps == NULL)
+                       goto error;
 
-       global_vircpuinfo = rte_zmalloc(NULL, sizeof(*global_vircpuinfo) *
-                       CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
-       if (global_vircpuinfo == NULL) {
-               RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
-               goto error;
-       }
-       global_cpumaps = rte_zmalloc(NULL, CHANNEL_CMDS_MAX_CPUS * global_maplen,
-                       RTE_CACHE_LINE_SIZE);
-       if (global_cpumaps == NULL) {
-               goto error;
+               if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
+                       RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
+                       goto error;
+               }
+               global_n_host_cpus = (unsigned int)info.cpus;
        }
 
-       if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
-               RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
-               goto error;
-       }
 
-       global_n_host_cpus = (unsigned)info.cpus;
 
        if (global_n_host_cpus > CHANNEL_CMDS_MAX_CPUS) {
                RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the "
@@ -840,7 +929,8 @@ channel_manager_init(const char *path)
 
        return 0;
 error:
-       disconnect_hypervisor();
+       if (global_hypervisor_available)
+               disconnect_hypervisor();
        return -1;
 }
 
@@ -848,16 +938,19 @@ void
 channel_manager_exit(void)
 {
        unsigned i;
-       uint64_t mask;
+       char mask[POWER_MGR_MAX_CPUS];
        struct virtual_machine_info *vm_info;
 
        LIST_FOREACH(vm_info, &vm_list_head, vms_info) {
 
                rte_spinlock_lock(&(vm_info->config_spinlock));
 
-               mask = vm_info->channel_mask;
-               ITERATIVE_BITMASK_CHECK_64(mask, i) {
-                       remove_channel_from_monitor(vm_info->channels[i]);
+               memcpy(mask, (char *)vm_info->channel_mask, POWER_MGR_MAX_CPUS);
+               for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
+                       if (mask[i] != 1)
+                               continue;
+                       remove_channel_from_monitor(
+                                       vm_info->channels[i]);
                        close(vm_info->channels[i]->fd);
                        rte_free(vm_info->channels[i]);
                }
@@ -867,7 +960,10 @@ channel_manager_exit(void)
                rte_free(vm_info);
        }
 
-       rte_free(global_cpumaps);
-       rte_free(global_vircpuinfo);
-       disconnect_hypervisor();
+       if (global_hypervisor_available) {
+               /* Only needed if hypervisor available */
+               rte_free(global_cpumaps);
+               rte_free(global_vircpuinfo);
+               disconnect_hypervisor();
+       }
 }