-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <stdio.h>
#include <sys/queue.h>
#include <sys/types.h>
+#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/select.h>
-#include <rte_config.h>
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_mempool.h>
static virVcpuInfo *global_vircpuinfo;
static size_t global_maplen;
-static unsigned global_n_host_cpus;
+static unsigned int global_n_host_cpus;
+static bool global_hypervisor_available;
/*
* Represents a single Virtual Machine
{
struct virtual_machine_info *vm_info =
(struct virtual_machine_info *)chan_info->priv_info;
- return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
+
+ if (global_hypervisor_available && (vm_info != NULL))
+ return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
+ else
+ return 0;
}
static inline int
return 0;
}
+static int
+open_host_channel(struct channel_info *info)
+{
+ int flags;
+
+ info->fd = open(info->channel_path, O_RDWR | O_RSYNC);
+ if (info->fd == -1) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) opening fifo for '%s'\n",
+ strerror(errno),
+ info->channel_path);
+ return -1;
+ }
+
+ /* Get current flags */
+ flags = fcntl(info->fd, F_GETFL, 0);
+ if (flags < 0) {
+ RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
+ "'%s'\n", strerror(errno), info->channel_path);
+ return 1;
+ }
+ /* Set to Non Blocking */
+ flags |= O_NONBLOCK;
+ if (fcntl(info->fd, F_SETFL, flags) < 0) {
+ RTE_LOG(WARNING, CHANNEL_MANAGER,
+ "Error(%s) setting non-blocking "
+ "socket for '%s'\n",
+ strerror(errno), info->channel_path);
+ return -1;
+ }
+ return 0;
+}
+
static int
setup_channel_info(struct virtual_machine_info **vm_info_dptr,
struct channel_info **chan_info_dptr, unsigned channel_num)
chan_info->channel_num = channel_num;
chan_info->priv_info = (void *)vm_info;
chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
+ chan_info->type = CHANNEL_TYPE_BINARY;
if (open_non_blocking_channel(chan_info) < 0) {
RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: "
"'%s' for VM '%s'\n",
return 0;
}
+static void
+fifo_path(char *dst, unsigned int len)
+{
+ snprintf(dst, len, "%sfifo", CHANNEL_MGR_SOCKET_PATH);
+}
+
+static int
+setup_host_channel_info(struct channel_info **chan_info_dptr,
+ unsigned int channel_num)
+{
+ struct channel_info *chan_info = *chan_info_dptr;
+
+ chan_info->channel_num = channel_num;
+ chan_info->priv_info = (void *)NULL;
+ chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
+ chan_info->type = CHANNEL_TYPE_JSON;
+
+ fifo_path(chan_info->channel_path, sizeof(chan_info->channel_path));
+
+ if (open_host_channel(chan_info) < 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open host channel: "
+ "'%s'\n",
+ chan_info->channel_path);
+ return -1;
+ }
+ if (add_channel_to_monitor(&chan_info) < 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
+ "'%s' to epoll ctl\n",
+ chan_info->channel_path);
+ return -1;
+
+ }
+ chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
+ return 0;
+}
+
int
add_all_channels(const char *vm_name)
{
return num_channels_enabled;
}
+int
+add_host_channel(void)
+{
+ struct channel_info *chan_info;
+ char socket_path[PATH_MAX];
+ int num_channels_enabled = 0;
+ int ret;
+
+ fifo_path(socket_path, sizeof(socket_path));
+
+ ret = mkfifo(socket_path, 0660);
+ if ((errno != EEXIST) && (ret < 0)) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot create fifo '%s' error: "
+ "%s\n", socket_path, strerror(errno));
+ return 0;
+ }
+
+ if (access(socket_path, F_OK) < 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: "
+ "%s\n", socket_path, strerror(errno));
+ return 0;
+ }
+ chan_info = rte_malloc(NULL, sizeof(*chan_info), 0);
+ if (chan_info == NULL) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
+ "channel '%s'\n", socket_path);
+ return 0;
+ }
+ snprintf(chan_info->channel_path,
+ sizeof(chan_info->channel_path), "%s", socket_path);
+ if (setup_host_channel_info(&chan_info, 0) < 0) {
+ rte_free(chan_info);
+ return 0;
+ }
+ num_channels_enabled++;
+
+ return num_channels_enabled;
+}
+
int
remove_channel(struct channel_info **chan_info_dptr)
{
return num_channels_changed;
}
+void
+get_all_vm(int *num_vm, int *num_vcpu)
+{
+
+ virNodeInfo node_info;
+ virDomainPtr *domptr;
+ uint64_t mask;
+ int i, ii, numVcpus[MAX_VCPUS], cpu, n_vcpus;
+ unsigned int jj;
+ const char *vm_name;
+ unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING |
+ VIR_CONNECT_LIST_DOMAINS_PERSISTENT;
+ unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG;
+
+ if (!global_hypervisor_available)
+ return;
+
+ memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
+ if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
+ return;
+ }
+
+ /* Returns number of pcpus */
+ global_n_host_cpus = (unsigned int)node_info.cpus;
+
+ /* Returns number of active domains */
+ *num_vm = virConnectListAllDomains(global_vir_conn_ptr, &domptr,
+ domain_flags);
+ if (*num_vm <= 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "No Active Domains Running\n");
+ return;
+ }
+
+ for (i = 0; i < *num_vm; i++) {
+
+ /* Get Domain Names */
+ vm_name = virDomainGetName(domptr[i]);
+ lvm_info[i].vm_name = vm_name;
+
+ /* Get Number of Vcpus */
+ numVcpus[i] = virDomainGetVcpusFlags(domptr[i], domain_flag);
+
+ /* Get Number of VCpus & VcpuPinInfo */
+ n_vcpus = virDomainGetVcpuPinInfo(domptr[i],
+ numVcpus[i], global_cpumaps,
+ global_maplen, domain_flag);
+
+ if ((int)n_vcpus > 0) {
+ *num_vcpu = n_vcpus;
+ lvm_info[i].num_cpus = n_vcpus;
+ }
+
+ /* Save pcpu in use by libvirt VMs */
+ for (ii = 0; ii < n_vcpus; ii++) {
+ mask = 0;
+ for (jj = 0; jj < global_n_host_cpus; jj++) {
+ if (VIR_CPU_USABLE(global_cpumaps,
+ global_maplen, ii, jj) > 0) {
+ mask |= 1ULL << jj;
+ }
+ }
+ ITERATIVE_BITMASK_CHECK_64(mask, cpu) {
+ lvm_info[i].pcpus[ii] = cpu;
+ }
+ }
+ }
+}
+
int
get_info_vm(const char *vm_name, struct vm_info *info)
{
return -1;
}
strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
+ new_domain->name[sizeof(new_domain->name) - 1] = '\0';
new_domain->channel_mask = 0;
new_domain->num_channels = 0;
}
return 0;
}
-
int
-channel_manager_init(const char *path)
+channel_manager_init(const char *path __rte_unused)
{
- int n_cpus;
+ virNodeInfo info;
LIST_INIT(&vm_list_head);
if (connect_hypervisor(path) < 0) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
- return -1;
- }
-
- global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
+ global_n_host_cpus = 64;
+ global_hypervisor_available = 0;
+ RTE_LOG(INFO, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
+ } else {
+ global_hypervisor_available = 1;
+
+ global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
+
+ global_vircpuinfo = rte_zmalloc(NULL,
+ sizeof(*global_vircpuinfo) *
+ CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
+ if (global_vircpuinfo == NULL) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
+ goto error;
+ }
+ global_cpumaps = rte_zmalloc(NULL,
+ CHANNEL_CMDS_MAX_CPUS * global_maplen,
+ RTE_CACHE_LINE_SIZE);
+ if (global_cpumaps == NULL)
+ goto error;
- global_vircpuinfo = rte_zmalloc(NULL, sizeof(*global_vircpuinfo) *
- CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
- if (global_vircpuinfo == NULL) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
- goto error;
- }
- global_cpumaps = rte_zmalloc(NULL, CHANNEL_CMDS_MAX_CPUS * global_maplen,
- RTE_CACHE_LINE_SIZE);
- if (global_cpumaps == NULL) {
- goto error;
+ if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
+ goto error;
+ }
+ global_n_host_cpus = (unsigned int)info.cpus;
}
- n_cpus = virNodeGetCPUMap(global_vir_conn_ptr, NULL, NULL, 0);
- if (n_cpus <= 0) {
- RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to get the number of Host "
- "CPUs\n");
- goto error;
- }
- global_n_host_cpus = (unsigned)n_cpus;
- if (global_n_host_cpus > CHANNEL_CMDS_MAX_CPUS)
+
+ if (global_n_host_cpus > CHANNEL_CMDS_MAX_CPUS) {
RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the "
"maximum of %u. No cores over %u should be used.\n",
global_n_host_cpus, CHANNEL_CMDS_MAX_CPUS,
CHANNEL_CMDS_MAX_CPUS - 1);
+ global_n_host_cpus = CHANNEL_CMDS_MAX_CPUS;
+ }
return 0;
error:
- disconnect_hypervisor();
+ if (global_hypervisor_available)
+ disconnect_hypervisor();
return -1;
}
rte_free(vm_info);
}
- rte_free(global_cpumaps);
- rte_free(global_vircpuinfo);
- disconnect_hypervisor();
+ if (global_hypervisor_available) {
+ /* Only needed if hypervisor available */
+ rte_free(global_cpumaps);
+ rte_free(global_vircpuinfo);
+ disconnect_hypervisor();
+ }
}