examples/l3fwd-power: add Rx interrupt timeout
[dpdk.git] / examples / vm_power_manager / channel_manager.c
index 7d892e2..4ac21f0 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
  */
 
 #include <stdio.h>
 
 #include <sys/queue.h>
 #include <sys/types.h>
+#include <sys/stat.h>
 #include <sys/socket.h>
 #include <sys/select.h>
 
-#include <rte_config.h>
+#include <rte_string_fns.h>
 #include <rte_malloc.h>
 #include <rte_memory.h>
 #include <rte_mempool.h>
 #include "channel_manager.h"
 #include "channel_commands.h"
 #include "channel_monitor.h"
+#include "power_manager.h"
 
 
 #define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1
 
-#define ITERATIVE_BITMASK_CHECK_64(mask_u64b, i) \
-               for (i = 0; mask_u64b; mask_u64b &= ~(1ULL << i++)) \
-               if ((mask_u64b >> i) & 1) \
-
 /* Global pointer to libvirt connection */
 static virConnectPtr global_vir_conn_ptr;
 
@@ -73,21 +42,23 @@ static unsigned char *global_cpumaps;
 static virVcpuInfo *global_vircpuinfo;
 static size_t global_maplen;
 
-static unsigned global_n_host_cpus;
+static unsigned int global_n_host_cpus;
+static bool global_hypervisor_available;
 
 /*
  * Represents a single Virtual Machine
  */
 struct virtual_machine_info {
        char name[CHANNEL_MGR_MAX_NAME_LEN];
-       rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS];
-       struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
-       uint64_t channel_mask;
+       uint16_t pcpu_map[RTE_MAX_LCORE];
+       struct channel_info *channels[RTE_MAX_LCORE];
+       char channel_mask[RTE_MAX_LCORE];
        uint8_t num_channels;
        enum vm_status status;
        virDomainPtr domainPtr;
        virDomainInfo info;
        rte_spinlock_t config_spinlock;
+       int allow_query;
        LIST_ENTRY(virtual_machine_info) vms_info;
 };
 
@@ -110,9 +81,8 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
        virVcpuInfoPtr cpuinfo;
        unsigned i, j;
        int n_vcpus;
-       uint64_t mask;
 
-       memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
+       memset(global_cpumaps, 0, RTE_MAX_LCORE*global_maplen);
 
        if (!virDomainIsActive(vm_info->domainPtr)) {
                n_vcpus = virDomainGetVcpuPinInfo(vm_info->domainPtr,
@@ -127,21 +97,21 @@ update_pcpus_mask(struct virtual_machine_info *vm_info)
        }
 
        memset(global_vircpuinfo, 0, sizeof(*global_vircpuinfo)*
-                       CHANNEL_CMDS_MAX_CPUS);
+                       RTE_MAX_LCORE);
 
        cpuinfo = global_vircpuinfo;
 
        n_vcpus = virDomainGetVcpus(vm_info->domainPtr, cpuinfo,
-                       CHANNEL_CMDS_MAX_CPUS, global_cpumaps, global_maplen);
+                       RTE_MAX_LCORE, global_cpumaps, global_maplen);
        if (n_vcpus < 0) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for "
                                "active VM '%s'\n", vm_info->name);
                return -1;
        }
 update_pcpus:
-       if (n_vcpus >= CHANNEL_CMDS_MAX_CPUS) {
+       if (n_vcpus >= RTE_MAX_LCORE) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Number of vCPUS(%u) is out of range "
-                               "0...%d\n", n_vcpus, CHANNEL_CMDS_MAX_CPUS-1);
+                               "0...%d\n", n_vcpus, RTE_MAX_LCORE-1);
                return -1;
        }
        if (n_vcpus != vm_info->info.nrVirtCpu) {
@@ -150,29 +120,28 @@ update_pcpus:
                                n_vcpus);
                vm_info->info.nrVirtCpu = n_vcpus;
        }
+       rte_spinlock_lock(&(vm_info->config_spinlock));
        for (i = 0; i < vm_info->info.nrVirtCpu; i++) {
-               mask = 0;
                for (j = 0; j < global_n_host_cpus; j++) {
-                       if (VIR_CPU_USABLE(global_cpumaps, global_maplen, i, j) > 0) {
-                               mask |= 1ULL << j;
-                       }
+                       if (VIR_CPU_USABLE(global_cpumaps,
+                                       global_maplen, i, j) <= 0)
+                               continue;
+                       vm_info->pcpu_map[i] = j;
                }
-               rte_atomic64_set(&vm_info->pcpu_mask[i], mask);
        }
+       rte_spinlock_unlock(&(vm_info->config_spinlock));
        return 0;
 }
 
 int
-set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
+set_pcpu(char *vm_name, unsigned int vcpu, unsigned int pcpu)
 {
-       unsigned i = 0;
        int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
        struct virtual_machine_info *vm_info;
-       uint64_t mask = core_mask;
 
-       if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
+       if (vcpu >= RTE_MAX_LCORE) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
-                               vcpu, CHANNEL_CMDS_MAX_CPUS-1);
+                               vcpu, RTE_MAX_LCORE-1);
                return -1;
        }
 
@@ -184,8 +153,8 @@ set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
 
        if (!virDomainIsActive(vm_info->domainPtr)) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
-                               "mask(0x%"PRIx64") for VM '%s', VM is not active\n",
-                               vcpu, core_mask, vm_info->name);
+                               " for VM '%s', VM is not active\n",
+                               vcpu, vm_info->name);
                return -1;
        }
 
@@ -194,48 +163,51 @@ set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
                                "vCPUs(%u)\n", vcpu, vm_info->info.nrVirtCpu);
                return -1;
        }
-       memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
-       ITERATIVE_BITMASK_CHECK_64(mask, i) {
-               VIR_USE_CPU(global_cpumaps, i);
-               if (i >= global_n_host_cpus) {
-                       RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
-                                       "number of CPUs(%u)\n", i, global_n_host_cpus);
-                       return -1;
-               }
+       memset(global_cpumaps, 0, RTE_MAX_LCORE * global_maplen);
+
+       VIR_USE_CPU(global_cpumaps, pcpu);
+
+       if (pcpu >= global_n_host_cpus) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
+                               "number of CPUs(%u)\n",
+                               pcpu, global_n_host_cpus);
+               return -1;
        }
+
        if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
                        global_maplen, flags) < 0) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
-                               "mask(0x%"PRIx64") for VM '%s'\n", vcpu, core_mask,
+                               " for VM '%s'\n", vcpu,
                                vm_info->name);
                return -1;
        }
-       rte_atomic64_set(&vm_info->pcpu_mask[vcpu], core_mask);
+       rte_spinlock_lock(&(vm_info->config_spinlock));
+       vm_info->pcpu_map[vcpu] = pcpu;
+       rte_spinlock_unlock(&(vm_info->config_spinlock));
        return 0;
-
-}
-
-int
-set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num)
-{
-       uint64_t mask = 1ULL << core_num;
-
-       return set_pcpus_mask(vm_name, vcpu, mask);
 }
 
-uint64_t
-get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
+uint16_t
+get_pcpu(struct channel_info *chan_info, unsigned int vcpu)
 {
        struct virtual_machine_info *vm_info =
                        (struct virtual_machine_info *)chan_info->priv_info;
-       return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
+
+       if (global_hypervisor_available && (vm_info != NULL)) {
+               uint16_t pcpu;
+               rte_spinlock_lock(&(vm_info->config_spinlock));
+               pcpu = vm_info->pcpu_map[vcpu];
+               rte_spinlock_unlock(&(vm_info->config_spinlock));
+               return pcpu;
+       } else
+               return 0;
 }
 
 static inline int
 channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num)
 {
        rte_spinlock_lock(&(vm_info->config_spinlock));
-       if (vm_info->channel_mask & (1ULL << channel_num)) {
+       if (vm_info->channel_mask[channel_num] == 1) {
                rte_spinlock_unlock(&(vm_info->config_spinlock));
                return 1;
        }
@@ -254,7 +226,7 @@ open_non_blocking_channel(struct channel_info *info)
        struct timeval tv;
 
        info->fd = socket(AF_UNIX, SOCK_STREAM, 0);
-       if (info->fd == -1) {
+       if (info->fd < 0) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) creating socket for '%s'\n",
                                strerror(errno),
                                info->channel_path);
@@ -309,6 +281,38 @@ open_non_blocking_channel(struct channel_info *info)
        return 0;
 }
 
+static int
+open_host_channel(struct channel_info *info)
+{
+       int flags;
+
+       info->fd = open(info->channel_path, O_RDWR | O_RSYNC);
+       if (info->fd < 0) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) opening fifo for '%s'\n",
+                               strerror(errno),
+                               info->channel_path);
+               return -1;
+       }
+
+       /* Get current flags */
+       flags = fcntl(info->fd, F_GETFL, 0);
+       if (flags < 0) {
+               RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
+                               "'%s'\n", strerror(errno), info->channel_path);
+               return 1;
+       }
+       /* Set to Non Blocking */
+       flags |= O_NONBLOCK;
+       if (fcntl(info->fd, F_SETFL, flags) < 0) {
+               RTE_LOG(WARNING, CHANNEL_MANAGER,
+                               "Error(%s) setting non-blocking "
+                               "socket for '%s'\n",
+                               strerror(errno), info->channel_path);
+               return -1;
+       }
+       return 0;
+}
+
 static int
 setup_channel_info(struct virtual_machine_info **vm_info_dptr,
                struct channel_info **chan_info_dptr, unsigned channel_num)
@@ -319,6 +323,7 @@ setup_channel_info(struct virtual_machine_info **vm_info_dptr,
        chan_info->channel_num = channel_num;
        chan_info->priv_info = (void *)vm_info;
        chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
+       chan_info->type = CHANNEL_TYPE_BINARY;
        if (open_non_blocking_channel(chan_info) < 0) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: "
                                "'%s' for VM '%s'\n",
@@ -334,13 +339,59 @@ setup_channel_info(struct virtual_machine_info **vm_info_dptr,
        }
        rte_spinlock_lock(&(vm_info->config_spinlock));
        vm_info->num_channels++;
-       vm_info->channel_mask |= 1ULL << channel_num;
+       vm_info->channel_mask[channel_num] = 1;
        vm_info->channels[channel_num] = chan_info;
        chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
        rte_spinlock_unlock(&(vm_info->config_spinlock));
        return 0;
 }
 
+static int
+fifo_path(char *dst, unsigned int len, unsigned int id)
+{
+       int cnt;
+
+       cnt = snprintf(dst, len, "%s%s%d", CHANNEL_MGR_SOCKET_PATH,
+                       CHANNEL_MGR_FIFO_PATTERN_NAME, id);
+
+       if ((cnt < 0) || (cnt > (int)len - 1)) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Could not create proper "
+                       "string for fifo path\n");
+
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+setup_host_channel_info(struct channel_info **chan_info_dptr,
+               unsigned int channel_num)
+{
+       struct channel_info *chan_info = *chan_info_dptr;
+
+       chan_info->channel_num = channel_num;
+       chan_info->priv_info = (void *)NULL;
+       chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
+       chan_info->type = CHANNEL_TYPE_JSON;
+
+       if (open_host_channel(chan_info) < 0) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open host channel: "
+                               "'%s'\n",
+                               chan_info->channel_path);
+               return -1;
+       }
+       if (add_channel_to_monitor(&chan_info) < 0) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
+                               "'%s' to epoll ctl\n",
+                               chan_info->channel_path);
+               return -1;
+
+       }
+       chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
+       return 0;
+}
+
 int
 add_all_channels(const char *vm_name)
 {
@@ -376,7 +427,7 @@ add_all_channels(const char *vm_name)
                                !strncmp(dir->d_name, "..", 2))
                        continue;
 
-               snprintf(socket_name, sizeof(socket_name), "%s", dir->d_name);
+               strlcpy(socket_name, dir->d_name, sizeof(socket_name));
                remaining = socket_name;
                /* Extract vm_name from "<vm_name>.<channel_num>" */
                token = strsep(&remaining, ".");
@@ -396,10 +447,10 @@ add_all_channels(const char *vm_name)
                                        dir->d_name);
                        continue;
                }
-               if (channel_num >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
+               if (channel_num >= RTE_MAX_LCORE) {
                        RTE_LOG(WARNING, CHANNEL_MANAGER, "Channel number(%u) is "
                                        "greater than max allowable: %d, skipping '%s%s'\n",
-                                       channel_num, CHANNEL_CMDS_MAX_VM_CHANNELS-1,
+                                       channel_num, RTE_MAX_LCORE-1,
                                        CHANNEL_MGR_SOCKET_PATH, dir->d_name);
                        continue;
                }
@@ -455,10 +506,10 @@ add_channels(const char *vm_name, unsigned *channel_list,
 
        for (i = 0; i < len_channel_list; i++) {
 
-               if (channel_list[i] >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
+               if (channel_list[i] >= RTE_MAX_LCORE) {
                        RTE_LOG(INFO, CHANNEL_MANAGER, "Channel(%u) is out of range "
                                                        "0...%d\n", channel_list[i],
-                                                       CHANNEL_CMDS_MAX_VM_CHANNELS-1);
+                                                       RTE_MAX_LCORE-1);
                        continue;
                }
                if (channel_exists(vm_info, channel_list[i])) {
@@ -495,6 +546,73 @@ add_channels(const char *vm_name, unsigned *channel_list,
        return num_channels_enabled;
 }
 
+int
+add_host_channels(void)
+{
+       struct channel_info *chan_info;
+       char socket_path[PATH_MAX];
+       int num_channels_enabled = 0;
+       int ret;
+       struct core_info *ci;
+       struct channel_info *chan_infos[RTE_MAX_LCORE];
+       int i;
+
+       for (i = 0; i < RTE_MAX_LCORE; i++)
+               chan_infos[i] = NULL;
+
+       ci = get_core_info();
+       if (ci == NULL) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot allocate memory for core_info\n");
+               return 0;
+       }
+
+       for (i = 0; i < ci->core_count; i++) {
+               if (ci->cd[i].global_enabled_cpus == 0)
+                       continue;
+
+               ret = fifo_path(socket_path, sizeof(socket_path), i);
+               if (ret < 0)
+                       goto error;
+
+               ret = mkfifo(socket_path, 0660);
+               RTE_LOG(DEBUG, CHANNEL_MANAGER, "TRY CREATE fifo '%s'\n",
+                       socket_path);
+               if ((errno != EEXIST) && (ret < 0)) {
+                       RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot create fifo '%s' error: "
+                                       "%s\n", socket_path, strerror(errno));
+                       goto error;
+               }
+               chan_info = rte_malloc(NULL, sizeof(*chan_info), 0);
+               if (chan_info == NULL) {
+                       RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
+                                       "channel '%s'\n", socket_path);
+                       goto error;
+               }
+               chan_infos[i] = chan_info;
+               strlcpy(chan_info->channel_path, socket_path,
+                               sizeof(chan_info->channel_path));
+
+               if (setup_host_channel_info(&chan_info, i) < 0) {
+                       rte_free(chan_info);
+                       chan_infos[i] = NULL;
+                       goto error;
+               }
+               num_channels_enabled++;
+       }
+
+       return num_channels_enabled;
+error:
+       /* Clean up the channels opened before we hit an error. */
+       for (i = 0; i < ci->core_count; i++) {
+               if (chan_infos[i] != NULL) {
+                       remove_channel_from_monitor(chan_infos[i]);
+                       close(chan_infos[i]->fd);
+                       rte_free(chan_infos[i]);
+               }
+       }
+       return 0;
+}
+
 int
 remove_channel(struct channel_info **chan_info_dptr)
 {
@@ -506,7 +624,7 @@ remove_channel(struct channel_info **chan_info_dptr)
        vm_info = (struct virtual_machine_info *)chan_info->priv_info;
 
        rte_spinlock_lock(&(vm_info->config_spinlock));
-       vm_info->channel_mask &= ~(1ULL << chan_info->channel_num);
+       vm_info->channel_mask[chan_info->channel_num] = 0;
        vm_info->num_channels--;
        rte_spinlock_unlock(&(vm_info->config_spinlock));
 
@@ -519,7 +637,7 @@ set_channel_status_all(const char *vm_name, enum channel_status status)
 {
        struct virtual_machine_info *vm_info;
        unsigned i;
-       uint64_t mask;
+       char mask[RTE_MAX_LCORE];
        int num_channels_changed = 0;
 
        if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
@@ -535,8 +653,10 @@ set_channel_status_all(const char *vm_name, enum channel_status status)
        }
 
        rte_spinlock_lock(&(vm_info->config_spinlock));
-       mask = vm_info->channel_mask;
-       ITERATIVE_BITMASK_CHECK_64(mask, i) {
+       memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE);
+       for (i = 0; i < RTE_MAX_LCORE; i++) {
+               if (mask[i] != 1)
+                       continue;
                vm_info->channels[i]->status = status;
                num_channels_changed++;
        }
@@ -575,12 +695,76 @@ set_channel_status(const char *vm_name, unsigned *channel_list,
        return num_channels_changed;
 }
 
+void
+get_all_vm(int *num_vm, int *num_vcpu)
+{
+
+       virNodeInfo node_info;
+       virDomainPtr *domptr;
+       int i, ii, numVcpus[MAX_VCPUS], n_vcpus;
+       unsigned int jj;
+       const char *vm_name;
+       unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING |
+                               VIR_CONNECT_LIST_DOMAINS_PERSISTENT;
+       unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG;
+
+       if (!global_hypervisor_available)
+               return;
+
+       memset(global_cpumaps, 0, RTE_MAX_LCORE*global_maplen);
+       if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
+               return;
+       }
+
+       /* Returns number of pcpus */
+       global_n_host_cpus = (unsigned int)node_info.cpus;
+
+       /* Returns number of active domains */
+       *num_vm = virConnectListAllDomains(global_vir_conn_ptr, &domptr,
+                                       domain_flags);
+       if (*num_vm <= 0) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "No Active Domains Running\n");
+               return;
+       }
+
+       for (i = 0; i < *num_vm; i++) {
+
+               /* Get Domain Names */
+               vm_name = virDomainGetName(domptr[i]);
+               lvm_info[i].vm_name = vm_name;
+
+               /* Get Number of Vcpus */
+               numVcpus[i] = virDomainGetVcpusFlags(domptr[i], domain_flag);
+
+               /* Get Number of VCpus & VcpuPinInfo */
+               n_vcpus = virDomainGetVcpuPinInfo(domptr[i],
+                               numVcpus[i], global_cpumaps,
+                               global_maplen, domain_flag);
+
+               if ((int)n_vcpus > 0) {
+                       *num_vcpu = n_vcpus;
+                       lvm_info[i].num_cpus = n_vcpus;
+               }
+
+               /* Save pcpu in use by libvirt VMs */
+               for (ii = 0; ii < n_vcpus; ii++) {
+                       for (jj = 0; jj < global_n_host_cpus; jj++) {
+                               if (VIR_CPU_USABLE(global_cpumaps,
+                                               global_maplen, ii, jj) > 0) {
+                                       lvm_info[i].pcpus[ii] = jj;
+                               }
+                       }
+               }
+       }
+}
+
 int
 get_info_vm(const char *vm_name, struct vm_info *info)
 {
        struct virtual_machine_info *vm_info;
        unsigned i, channel_num = 0;
-       uint64_t mask;
+       char mask[RTE_MAX_LCORE];
 
        vm_info = find_domain_by_name(vm_name);
        if (vm_info == NULL) {
@@ -593,24 +777,32 @@ get_info_vm(const char *vm_name, struct vm_info *info)
 
        rte_spinlock_lock(&(vm_info->config_spinlock));
 
-       mask = vm_info->channel_mask;
-       ITERATIVE_BITMASK_CHECK_64(mask, i) {
+       memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE);
+       for (i = 0; i < RTE_MAX_LCORE; i++) {
+               if (mask[i] != 1)
+                       continue;
                info->channels[channel_num].channel_num = i;
                memcpy(info->channels[channel_num].channel_path,
-                               vm_info->channels[i]->channel_path, UNIX_PATH_MAX);
-               info->channels[channel_num].status = vm_info->channels[i]->status;
-               info->channels[channel_num].fd = vm_info->channels[i]->fd;
+                               vm_info->channels[i]->channel_path,
+                               UNIX_PATH_MAX);
+               info->channels[channel_num].status =
+                               vm_info->channels[i]->status;
+               info->channels[channel_num].fd =
+                               vm_info->channels[i]->fd;
                channel_num++;
        }
 
+       info->allow_query = vm_info->allow_query;
        info->num_channels = channel_num;
        info->num_vcpus = vm_info->info.nrVirtCpu;
        rte_spinlock_unlock(&(vm_info->config_spinlock));
 
        memcpy(info->name, vm_info->name, sizeof(vm_info->name));
+       rte_spinlock_lock(&(vm_info->config_spinlock));
        for (i = 0; i < info->num_vcpus; i++) {
-               info->pcpu_mask[i] = rte_atomic64_read(&vm_info->pcpu_mask[i]);
+               info->pcpu_map[i] = vm_info->pcpu_map[i];
        }
+       rte_spinlock_unlock(&(vm_info->config_spinlock));
        return 0;
 }
 
@@ -651,24 +843,25 @@ add_vm(const char *vm_name)
                rte_free(new_domain);
                return -1;
        }
-       if (new_domain->info.nrVirtCpu > CHANNEL_CMDS_MAX_CPUS) {
+       if (new_domain->info.nrVirtCpu > RTE_MAX_LCORE) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Error the number of virtual CPUs(%u) is "
                                "greater than allowable(%d)\n", new_domain->info.nrVirtCpu,
-                               CHANNEL_CMDS_MAX_CPUS);
+                               RTE_MAX_LCORE);
                rte_free(new_domain);
                return -1;
        }
 
-       for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) {
-               rte_atomic64_init(&new_domain->pcpu_mask[i]);
-       }
+       for (i = 0; i < RTE_MAX_LCORE; i++)
+               new_domain->pcpu_map[i] = 0;
+
        if (update_pcpus_mask(new_domain) < 0) {
                RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n");
                rte_free(new_domain);
                return -1;
        }
        strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
-       new_domain->channel_mask = 0;
+       new_domain->name[sizeof(new_domain->name) - 1] = '\0';
+       memset(new_domain->channel_mask, 0, RTE_MAX_LCORE);
        new_domain->num_channels = 0;
 
        if (!virDomainIsActive(dom_ptr))
@@ -676,6 +869,7 @@ add_vm(const char *vm_name)
        else
                new_domain->status = CHANNEL_MGR_VM_ACTIVE;
 
+       new_domain->allow_query = 0;
        rte_spinlock_init(&(new_domain->config_spinlock));
        LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info);
        return 0;
@@ -705,6 +899,23 @@ remove_vm(const char *vm_name)
        return 0;
 }
 
+int
+set_query_status(char *vm_name,
+               bool allow_query)
+{
+       struct virtual_machine_info *vm_info;
+
+       vm_info = find_domain_by_name(vm_name);
+       if (vm_info == NULL) {
+               RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name);
+               return -1;
+       }
+       rte_spinlock_lock(&(vm_info->config_spinlock));
+       vm_info->allow_query = allow_query ? 1 : 0;
+       rte_spinlock_unlock(&(vm_info->config_spinlock));
+       return 0;
+}
+
 static void
 disconnect_hypervisor(void)
 {
@@ -730,50 +941,55 @@ connect_hypervisor(const char *path)
        }
        return 0;
 }
-
 int
-channel_manager_init(const char *path)
+channel_manager_init(const char *path __rte_unused)
 {
-       int n_cpus;
+       virNodeInfo info;
 
        LIST_INIT(&vm_list_head);
        if (connect_hypervisor(path) < 0) {
-               RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
-               return -1;
-       }
-
-       global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
+               global_n_host_cpus = 64;
+               global_hypervisor_available = 0;
+               RTE_LOG(INFO, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
+       } else {
+               global_hypervisor_available = 1;
+
+               global_maplen = VIR_CPU_MAPLEN(RTE_MAX_LCORE);
+
+               global_vircpuinfo = rte_zmalloc(NULL,
+                               sizeof(*global_vircpuinfo) *
+                               RTE_MAX_LCORE, RTE_CACHE_LINE_SIZE);
+               if (global_vircpuinfo == NULL) {
+                       RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
+                       goto error;
+               }
+               global_cpumaps = rte_zmalloc(NULL,
+                               RTE_MAX_LCORE * global_maplen,
+                               RTE_CACHE_LINE_SIZE);
+               if (global_cpumaps == NULL)
+                       goto error;
 
-       global_vircpuinfo = rte_zmalloc(NULL, sizeof(*global_vircpuinfo) *
-                       CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
-       if (global_vircpuinfo == NULL) {
-               RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
-               goto error;
-       }
-       global_cpumaps = rte_zmalloc(NULL, CHANNEL_CMDS_MAX_CPUS * global_maplen,
-                       RTE_CACHE_LINE_SIZE);
-       if (global_cpumaps == NULL) {
-               goto error;
+               if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
+                       RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
+                       goto error;
+               }
+               global_n_host_cpus = (unsigned int)info.cpus;
        }
 
-       n_cpus = virNodeGetCPUMap(global_vir_conn_ptr, NULL, NULL, 0);
-       if (n_cpus <= 0) {
-               RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to get the number of Host "
-                               "CPUs\n");
-               goto error;
-       }
-       global_n_host_cpus = (unsigned)n_cpus;
 
-       if (global_n_host_cpus > CHANNEL_CMDS_MAX_CPUS) {
-               RTE_LOG(ERR, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the "
-                               "maximum of %u\n", global_n_host_cpus, CHANNEL_CMDS_MAX_CPUS);
-               goto error;
 
+       if (global_n_host_cpus > RTE_MAX_LCORE) {
+               RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the "
+                               "maximum of %u. No cores over %u should be used.\n",
+                               global_n_host_cpus, RTE_MAX_LCORE,
+                               RTE_MAX_LCORE - 1);
+               global_n_host_cpus = RTE_MAX_LCORE;
        }
 
        return 0;
 error:
-       disconnect_hypervisor();
+       if (global_hypervisor_available)
+               disconnect_hypervisor();
        return -1;
 }
 
@@ -781,16 +997,19 @@ void
 channel_manager_exit(void)
 {
        unsigned i;
-       uint64_t mask;
+       char mask[RTE_MAX_LCORE];
        struct virtual_machine_info *vm_info;
 
        LIST_FOREACH(vm_info, &vm_list_head, vms_info) {
 
                rte_spinlock_lock(&(vm_info->config_spinlock));
 
-               mask = vm_info->channel_mask;
-               ITERATIVE_BITMASK_CHECK_64(mask, i) {
-                       remove_channel_from_monitor(vm_info->channels[i]);
+               memcpy(mask, (char *)vm_info->channel_mask, RTE_MAX_LCORE);
+               for (i = 0; i < RTE_MAX_LCORE; i++) {
+                       if (mask[i] != 1)
+                               continue;
+                       remove_channel_from_monitor(
+                                       vm_info->channels[i]);
                        close(vm_info->channels[i]->fd);
                        rte_free(vm_info->channels[i]);
                }
@@ -800,7 +1019,10 @@ channel_manager_exit(void)
                rte_free(vm_info);
        }
 
-       rte_free(global_cpumaps);
-       rte_free(global_vircpuinfo);
-       disconnect_hypervisor();
+       if (global_hypervisor_available) {
+               /* Only needed if hypervisor available */
+               rte_free(global_cpumaps);
+               rte_free(global_vircpuinfo);
+               disconnect_hypervisor();
+       }
 }