1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
14 #include <sys/queue.h>
15 #include <sys/types.h>
17 #include <sys/socket.h>
18 #include <sys/select.h>
20 #include <rte_malloc.h>
21 #include <rte_memory.h>
22 #include <rte_mempool.h>
24 #include <rte_atomic.h>
25 #include <rte_spinlock.h>
27 #include <libvirt/libvirt.h>
29 #include "channel_manager.h"
30 #include "channel_commands.h"
31 #include "channel_monitor.h"
34 #define RTE_LOGTYPE_CHANNEL_MANAGER RTE_LOGTYPE_USER1
36 #define ITERATIVE_BITMASK_CHECK_64(mask_u64b, i) \
37 for (i = 0; mask_u64b; mask_u64b &= ~(1ULL << i++)) \
38 if ((mask_u64b >> i) & 1) \
40 /* Global pointer to libvirt connection */
41 static virConnectPtr global_vir_conn_ptr;
43 static unsigned char *global_cpumaps;
44 static virVcpuInfo *global_vircpuinfo;
45 static size_t global_maplen;
47 static unsigned int global_n_host_cpus;
48 static bool global_hypervisor_available;
51 * Represents a single Virtual Machine
53 struct virtual_machine_info {
54 char name[CHANNEL_MGR_MAX_NAME_LEN];
55 rte_atomic64_t pcpu_mask[CHANNEL_CMDS_MAX_CPUS];
56 struct channel_info *channels[CHANNEL_CMDS_MAX_VM_CHANNELS];
57 uint64_t channel_mask;
59 enum vm_status status;
60 virDomainPtr domainPtr;
62 rte_spinlock_t config_spinlock;
63 LIST_ENTRY(virtual_machine_info) vms_info;
66 LIST_HEAD(, virtual_machine_info) vm_list_head;
68 static struct virtual_machine_info *
69 find_domain_by_name(const char *name)
71 struct virtual_machine_info *info;
72 LIST_FOREACH(info, &vm_list_head, vms_info) {
73 if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1))
80 update_pcpus_mask(struct virtual_machine_info *vm_info)
82 virVcpuInfoPtr cpuinfo;
87 memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
89 if (!virDomainIsActive(vm_info->domainPtr)) {
90 n_vcpus = virDomainGetVcpuPinInfo(vm_info->domainPtr,
91 vm_info->info.nrVirtCpu, global_cpumaps, global_maplen,
92 VIR_DOMAIN_AFFECT_CONFIG);
94 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for "
95 "in-active VM '%s'\n", vm_info->name);
101 memset(global_vircpuinfo, 0, sizeof(*global_vircpuinfo)*
102 CHANNEL_CMDS_MAX_CPUS);
104 cpuinfo = global_vircpuinfo;
106 n_vcpus = virDomainGetVcpus(vm_info->domainPtr, cpuinfo,
107 CHANNEL_CMDS_MAX_CPUS, global_cpumaps, global_maplen);
109 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting vCPU info for "
110 "active VM '%s'\n", vm_info->name);
114 if (n_vcpus >= CHANNEL_CMDS_MAX_CPUS) {
115 RTE_LOG(ERR, CHANNEL_MANAGER, "Number of vCPUS(%u) is out of range "
116 "0...%d\n", n_vcpus, CHANNEL_CMDS_MAX_CPUS-1);
119 if (n_vcpus != vm_info->info.nrVirtCpu) {
120 RTE_LOG(INFO, CHANNEL_MANAGER, "Updating the number of vCPUs for VM '%s"
121 " from %d -> %d\n", vm_info->name, vm_info->info.nrVirtCpu,
123 vm_info->info.nrVirtCpu = n_vcpus;
125 for (i = 0; i < vm_info->info.nrVirtCpu; i++) {
127 for (j = 0; j < global_n_host_cpus; j++) {
128 if (VIR_CPU_USABLE(global_cpumaps, global_maplen, i, j) > 0) {
132 rte_atomic64_set(&vm_info->pcpu_mask[i], mask);
138 set_pcpus_mask(char *vm_name, unsigned vcpu, uint64_t core_mask)
141 int flags = VIR_DOMAIN_AFFECT_LIVE|VIR_DOMAIN_AFFECT_CONFIG;
142 struct virtual_machine_info *vm_info;
143 uint64_t mask = core_mask;
145 if (vcpu >= CHANNEL_CMDS_MAX_CPUS) {
146 RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds max allowable(%d)\n",
147 vcpu, CHANNEL_CMDS_MAX_CPUS-1);
151 vm_info = find_domain_by_name(vm_name);
152 if (vm_info == NULL) {
153 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name);
157 if (!virDomainIsActive(vm_info->domainPtr)) {
158 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
159 "mask(0x%"PRIx64") for VM '%s', VM is not active\n",
160 vcpu, core_mask, vm_info->name);
164 if (vcpu >= vm_info->info.nrVirtCpu) {
165 RTE_LOG(ERR, CHANNEL_MANAGER, "vCPU(%u) exceeds the assigned number of "
166 "vCPUs(%u)\n", vcpu, vm_info->info.nrVirtCpu);
169 memset(global_cpumaps, 0 , CHANNEL_CMDS_MAX_CPUS * global_maplen);
170 ITERATIVE_BITMASK_CHECK_64(mask, i) {
171 VIR_USE_CPU(global_cpumaps, i);
172 if (i >= global_n_host_cpus) {
173 RTE_LOG(ERR, CHANNEL_MANAGER, "CPU(%u) exceeds the available "
174 "number of CPUs(%u)\n", i, global_n_host_cpus);
178 if (virDomainPinVcpuFlags(vm_info->domainPtr, vcpu, global_cpumaps,
179 global_maplen, flags) < 0) {
180 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to set vCPU(%u) to pCPU "
181 "mask(0x%"PRIx64") for VM '%s'\n", vcpu, core_mask,
185 rte_atomic64_set(&vm_info->pcpu_mask[vcpu], core_mask);
191 set_pcpu(char *vm_name, unsigned vcpu, unsigned core_num)
193 uint64_t mask = 1ULL << core_num;
195 return set_pcpus_mask(vm_name, vcpu, mask);
199 get_pcpus_mask(struct channel_info *chan_info, unsigned vcpu)
201 struct virtual_machine_info *vm_info =
202 (struct virtual_machine_info *)chan_info->priv_info;
204 if (global_hypervisor_available && (vm_info != NULL))
205 return rte_atomic64_read(&vm_info->pcpu_mask[vcpu]);
211 channel_exists(struct virtual_machine_info *vm_info, unsigned channel_num)
213 rte_spinlock_lock(&(vm_info->config_spinlock));
214 if (vm_info->channel_mask & (1ULL << channel_num)) {
215 rte_spinlock_unlock(&(vm_info->config_spinlock));
218 rte_spinlock_unlock(&(vm_info->config_spinlock));
225 open_non_blocking_channel(struct channel_info *info)
228 struct sockaddr_un sock_addr;
232 info->fd = socket(AF_UNIX, SOCK_STREAM, 0);
233 if (info->fd == -1) {
234 RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) creating socket for '%s'\n",
239 sock_addr.sun_family = AF_UNIX;
240 memcpy(&sock_addr.sun_path, info->channel_path,
241 strlen(info->channel_path)+1);
243 /* Get current flags */
244 flags = fcntl(info->fd, F_GETFL, 0);
246 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
247 "'%s'\n", strerror(errno), info->channel_path);
250 /* Set to Non Blocking */
252 if (fcntl(info->fd, F_SETFL, flags) < 0) {
253 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) setting non-blocking "
254 "socket for '%s'\n", strerror(errno), info->channel_path);
257 ret = connect(info->fd, (struct sockaddr *)&sock_addr,
260 /* ECONNREFUSED error is given when VM is not active */
261 if (errno == ECONNREFUSED) {
262 RTE_LOG(WARNING, CHANNEL_MANAGER, "VM is not active or has not "
263 "activated its endpoint to channel %s\n",
267 /* Wait for tv_sec if in progress */
268 else if (errno == EINPROGRESS) {
271 FD_ZERO(&soc_fd_set);
272 FD_SET(info->fd, &soc_fd_set);
273 if (select(info->fd+1, NULL, &soc_fd_set, NULL, &tv) > 0) {
274 RTE_LOG(WARNING, CHANNEL_MANAGER, "Timeout or error on channel "
275 "'%s'\n", info->channel_path);
279 /* Any other error */
280 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) connecting socket"
281 " for '%s'\n", strerror(errno), info->channel_path);
289 open_host_channel(struct channel_info *info)
293 info->fd = open(info->channel_path, O_RDWR | O_RSYNC);
294 if (info->fd == -1) {
295 RTE_LOG(ERR, CHANNEL_MANAGER, "Error(%s) opening fifo for '%s'\n",
301 /* Get current flags */
302 flags = fcntl(info->fd, F_GETFL, 0);
304 RTE_LOG(WARNING, CHANNEL_MANAGER, "Error(%s) fcntl get flags socket for"
305 "'%s'\n", strerror(errno), info->channel_path);
308 /* Set to Non Blocking */
310 if (fcntl(info->fd, F_SETFL, flags) < 0) {
311 RTE_LOG(WARNING, CHANNEL_MANAGER,
312 "Error(%s) setting non-blocking "
314 strerror(errno), info->channel_path);
321 setup_channel_info(struct virtual_machine_info **vm_info_dptr,
322 struct channel_info **chan_info_dptr, unsigned channel_num)
324 struct channel_info *chan_info = *chan_info_dptr;
325 struct virtual_machine_info *vm_info = *vm_info_dptr;
327 chan_info->channel_num = channel_num;
328 chan_info->priv_info = (void *)vm_info;
329 chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
330 chan_info->type = CHANNEL_TYPE_BINARY;
331 if (open_non_blocking_channel(chan_info) < 0) {
332 RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open channel: "
333 "'%s' for VM '%s'\n",
334 chan_info->channel_path, vm_info->name);
337 if (add_channel_to_monitor(&chan_info) < 0) {
338 RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
339 "'%s' to epoll ctl for VM '%s'\n",
340 chan_info->channel_path, vm_info->name);
344 rte_spinlock_lock(&(vm_info->config_spinlock));
345 vm_info->num_channels++;
346 vm_info->channel_mask |= 1ULL << channel_num;
347 vm_info->channels[channel_num] = chan_info;
348 chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
349 rte_spinlock_unlock(&(vm_info->config_spinlock));
354 fifo_path(char *dst, unsigned int len)
356 snprintf(dst, len, "%sfifo", CHANNEL_MGR_SOCKET_PATH);
360 setup_host_channel_info(struct channel_info **chan_info_dptr,
361 unsigned int channel_num)
363 struct channel_info *chan_info = *chan_info_dptr;
365 chan_info->channel_num = channel_num;
366 chan_info->priv_info = (void *)NULL;
367 chan_info->status = CHANNEL_MGR_CHANNEL_DISCONNECTED;
368 chan_info->type = CHANNEL_TYPE_JSON;
370 fifo_path(chan_info->channel_path, sizeof(chan_info->channel_path));
372 if (open_host_channel(chan_info) < 0) {
373 RTE_LOG(ERR, CHANNEL_MANAGER, "Could not open host channel: "
375 chan_info->channel_path);
378 if (add_channel_to_monitor(&chan_info) < 0) {
379 RTE_LOG(ERR, CHANNEL_MANAGER, "Could add channel: "
380 "'%s' to epoll ctl\n",
381 chan_info->channel_path);
385 chan_info->status = CHANNEL_MGR_CHANNEL_CONNECTED;
390 add_all_channels(const char *vm_name)
394 struct virtual_machine_info *vm_info;
395 struct channel_info *chan_info;
396 char *token, *remaining, *tail_ptr;
397 char socket_name[PATH_MAX];
398 unsigned channel_num;
399 int num_channels_enabled = 0;
401 /* verify VM exists */
402 vm_info = find_domain_by_name(vm_name);
403 if (vm_info == NULL) {
404 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' not found"
405 " during channel discovery\n", vm_name);
408 if (!virDomainIsActive(vm_info->domainPtr)) {
409 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name);
410 vm_info->status = CHANNEL_MGR_VM_INACTIVE;
413 d = opendir(CHANNEL_MGR_SOCKET_PATH);
415 RTE_LOG(ERR, CHANNEL_MANAGER, "Error opening directory '%s': %s\n",
416 CHANNEL_MGR_SOCKET_PATH, strerror(errno));
419 while ((dir = readdir(d)) != NULL) {
420 if (!strncmp(dir->d_name, ".", 1) ||
421 !strncmp(dir->d_name, "..", 2))
424 snprintf(socket_name, sizeof(socket_name), "%s", dir->d_name);
425 remaining = socket_name;
426 /* Extract vm_name from "<vm_name>.<channel_num>" */
427 token = strsep(&remaining, ".");
428 if (remaining == NULL)
430 if (strncmp(vm_name, token, CHANNEL_MGR_MAX_NAME_LEN))
433 /* remaining should contain only <channel_num> */
435 channel_num = (unsigned)strtol(remaining, &tail_ptr, 0);
436 if ((errno != 0) || (remaining[0] == '\0') ||
437 tail_ptr == NULL || (*tail_ptr != '\0')) {
438 RTE_LOG(WARNING, CHANNEL_MANAGER, "Malformed channel name"
439 "'%s' found it should be in the form of "
440 "'<guest_name>.<channel_num>(decimal)'\n",
444 if (channel_num >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
445 RTE_LOG(WARNING, CHANNEL_MANAGER, "Channel number(%u) is "
446 "greater than max allowable: %d, skipping '%s%s'\n",
447 channel_num, CHANNEL_CMDS_MAX_VM_CHANNELS-1,
448 CHANNEL_MGR_SOCKET_PATH, dir->d_name);
451 /* if channel has not been added previously */
452 if (channel_exists(vm_info, channel_num))
455 chan_info = rte_malloc(NULL, sizeof(*chan_info),
456 RTE_CACHE_LINE_SIZE);
457 if (chan_info == NULL) {
458 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
459 "channel '%s%s'\n", CHANNEL_MGR_SOCKET_PATH, dir->d_name);
463 snprintf(chan_info->channel_path,
464 sizeof(chan_info->channel_path), "%s%s",
465 CHANNEL_MGR_SOCKET_PATH, dir->d_name);
467 if (setup_channel_info(&vm_info, &chan_info, channel_num) < 0) {
472 num_channels_enabled++;
475 return num_channels_enabled;
479 add_channels(const char *vm_name, unsigned *channel_list,
480 unsigned len_channel_list)
482 struct virtual_machine_info *vm_info;
483 struct channel_info *chan_info;
484 char socket_path[PATH_MAX];
486 int num_channels_enabled = 0;
488 vm_info = find_domain_by_name(vm_name);
489 if (vm_info == NULL) {
490 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' "
491 "not found\n", vm_name);
495 if (!virDomainIsActive(vm_info->domainPtr)) {
496 RTE_LOG(ERR, CHANNEL_MANAGER, "VM: '%s' is not active\n", vm_name);
497 vm_info->status = CHANNEL_MGR_VM_INACTIVE;
501 for (i = 0; i < len_channel_list; i++) {
503 if (channel_list[i] >= CHANNEL_CMDS_MAX_VM_CHANNELS) {
504 RTE_LOG(INFO, CHANNEL_MANAGER, "Channel(%u) is out of range "
505 "0...%d\n", channel_list[i],
506 CHANNEL_CMDS_MAX_VM_CHANNELS-1);
509 if (channel_exists(vm_info, channel_list[i])) {
510 RTE_LOG(INFO, CHANNEL_MANAGER, "Channel already exists, skipping "
511 "'%s.%u'\n", vm_name, i);
515 snprintf(socket_path, sizeof(socket_path), "%s%s.%u",
516 CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]);
518 if (access(socket_path, F_OK) < 0) {
519 RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: "
520 "%s\n", socket_path, strerror(errno));
523 chan_info = rte_malloc(NULL, sizeof(*chan_info),
524 RTE_CACHE_LINE_SIZE);
525 if (chan_info == NULL) {
526 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
527 "channel '%s'\n", socket_path);
530 snprintf(chan_info->channel_path,
531 sizeof(chan_info->channel_path), "%s%s.%u",
532 CHANNEL_MGR_SOCKET_PATH, vm_name, channel_list[i]);
533 if (setup_channel_info(&vm_info, &chan_info, channel_list[i]) < 0) {
537 num_channels_enabled++;
540 return num_channels_enabled;
544 add_host_channel(void)
546 struct channel_info *chan_info;
547 char socket_path[PATH_MAX];
548 int num_channels_enabled = 0;
551 fifo_path(socket_path, sizeof(socket_path));
553 ret = mkfifo(socket_path, 0660);
554 if ((errno != EEXIST) && (ret < 0)) {
555 RTE_LOG(ERR, CHANNEL_MANAGER, "Cannot create fifo '%s' error: "
556 "%s\n", socket_path, strerror(errno));
560 if (access(socket_path, F_OK) < 0) {
561 RTE_LOG(ERR, CHANNEL_MANAGER, "Channel path '%s' error: "
562 "%s\n", socket_path, strerror(errno));
565 chan_info = rte_malloc(NULL, sizeof(*chan_info), 0);
566 if (chan_info == NULL) {
567 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for "
568 "channel '%s'\n", socket_path);
571 snprintf(chan_info->channel_path,
572 sizeof(chan_info->channel_path), "%s", socket_path);
573 if (setup_host_channel_info(&chan_info, 0) < 0) {
577 num_channels_enabled++;
579 return num_channels_enabled;
583 remove_channel(struct channel_info **chan_info_dptr)
585 struct virtual_machine_info *vm_info;
586 struct channel_info *chan_info = *chan_info_dptr;
588 close(chan_info->fd);
590 vm_info = (struct virtual_machine_info *)chan_info->priv_info;
592 rte_spinlock_lock(&(vm_info->config_spinlock));
593 vm_info->channel_mask &= ~(1ULL << chan_info->channel_num);
594 vm_info->num_channels--;
595 rte_spinlock_unlock(&(vm_info->config_spinlock));
602 set_channel_status_all(const char *vm_name, enum channel_status status)
604 struct virtual_machine_info *vm_info;
607 int num_channels_changed = 0;
609 if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
610 status == CHANNEL_MGR_CHANNEL_DISABLED)) {
611 RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or "
612 "disabled: Unable to change status for VM '%s'\n", vm_name);
614 vm_info = find_domain_by_name(vm_name);
615 if (vm_info == NULL) {
616 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to disable channels: VM '%s' "
617 "not found\n", vm_name);
621 rte_spinlock_lock(&(vm_info->config_spinlock));
622 mask = vm_info->channel_mask;
623 ITERATIVE_BITMASK_CHECK_64(mask, i) {
624 vm_info->channels[i]->status = status;
625 num_channels_changed++;
627 rte_spinlock_unlock(&(vm_info->config_spinlock));
628 return num_channels_changed;
633 set_channel_status(const char *vm_name, unsigned *channel_list,
634 unsigned len_channel_list, enum channel_status status)
636 struct virtual_machine_info *vm_info;
638 int num_channels_changed = 0;
640 if (!(status == CHANNEL_MGR_CHANNEL_CONNECTED ||
641 status == CHANNEL_MGR_CHANNEL_DISABLED)) {
642 RTE_LOG(ERR, CHANNEL_MANAGER, "Channels can only be enabled or "
643 "disabled: Unable to change status for VM '%s'\n", vm_name);
645 vm_info = find_domain_by_name(vm_name);
646 if (vm_info == NULL) {
647 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add channels: VM '%s' "
648 "not found\n", vm_name);
651 for (i = 0; i < len_channel_list; i++) {
652 if (channel_exists(vm_info, channel_list[i])) {
653 rte_spinlock_lock(&(vm_info->config_spinlock));
654 vm_info->channels[channel_list[i]]->status = status;
655 rte_spinlock_unlock(&(vm_info->config_spinlock));
656 num_channels_changed++;
659 return num_channels_changed;
663 get_all_vm(int *num_vm, int *num_vcpu)
666 virNodeInfo node_info;
667 virDomainPtr *domptr;
669 int i, ii, numVcpus[MAX_VCPUS], cpu, n_vcpus;
672 unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING |
673 VIR_CONNECT_LIST_DOMAINS_PERSISTENT;
674 unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG;
676 if (!global_hypervisor_available)
679 memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
680 if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) {
681 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
685 /* Returns number of pcpus */
686 global_n_host_cpus = (unsigned int)node_info.cpus;
688 /* Returns number of active domains */
689 *num_vm = virConnectListAllDomains(global_vir_conn_ptr, &domptr,
692 RTE_LOG(ERR, CHANNEL_MANAGER, "No Active Domains Running\n");
696 for (i = 0; i < *num_vm; i++) {
698 /* Get Domain Names */
699 vm_name = virDomainGetName(domptr[i]);
700 lvm_info[i].vm_name = vm_name;
702 /* Get Number of Vcpus */
703 numVcpus[i] = virDomainGetVcpusFlags(domptr[i], domain_flag);
705 /* Get Number of VCpus & VcpuPinInfo */
706 n_vcpus = virDomainGetVcpuPinInfo(domptr[i],
707 numVcpus[i], global_cpumaps,
708 global_maplen, domain_flag);
710 if ((int)n_vcpus > 0) {
712 lvm_info[i].num_cpus = n_vcpus;
715 /* Save pcpu in use by libvirt VMs */
716 for (ii = 0; ii < n_vcpus; ii++) {
718 for (jj = 0; jj < global_n_host_cpus; jj++) {
719 if (VIR_CPU_USABLE(global_cpumaps,
720 global_maplen, ii, jj) > 0) {
724 ITERATIVE_BITMASK_CHECK_64(mask, cpu) {
725 lvm_info[i].pcpus[ii] = cpu;
732 get_info_vm(const char *vm_name, struct vm_info *info)
734 struct virtual_machine_info *vm_info;
735 unsigned i, channel_num = 0;
738 vm_info = find_domain_by_name(vm_name);
739 if (vm_info == NULL) {
740 RTE_LOG(ERR, CHANNEL_MANAGER, "VM '%s' not found\n", vm_name);
743 info->status = CHANNEL_MGR_VM_ACTIVE;
744 if (!virDomainIsActive(vm_info->domainPtr))
745 info->status = CHANNEL_MGR_VM_INACTIVE;
747 rte_spinlock_lock(&(vm_info->config_spinlock));
749 mask = vm_info->channel_mask;
750 ITERATIVE_BITMASK_CHECK_64(mask, i) {
751 info->channels[channel_num].channel_num = i;
752 memcpy(info->channels[channel_num].channel_path,
753 vm_info->channels[i]->channel_path, UNIX_PATH_MAX);
754 info->channels[channel_num].status = vm_info->channels[i]->status;
755 info->channels[channel_num].fd = vm_info->channels[i]->fd;
759 info->num_channels = channel_num;
760 info->num_vcpus = vm_info->info.nrVirtCpu;
761 rte_spinlock_unlock(&(vm_info->config_spinlock));
763 memcpy(info->name, vm_info->name, sizeof(vm_info->name));
764 for (i = 0; i < info->num_vcpus; i++) {
765 info->pcpu_mask[i] = rte_atomic64_read(&vm_info->pcpu_mask[i]);
771 add_vm(const char *vm_name)
773 struct virtual_machine_info *new_domain;
774 virDomainPtr dom_ptr;
777 if (find_domain_by_name(vm_name) != NULL) {
778 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to add VM: VM '%s' "
779 "already exists\n", vm_name);
783 if (global_vir_conn_ptr == NULL) {
784 RTE_LOG(ERR, CHANNEL_MANAGER, "No connection to hypervisor exists\n");
787 dom_ptr = virDomainLookupByName(global_vir_conn_ptr, vm_name);
788 if (dom_ptr == NULL) {
789 RTE_LOG(ERR, CHANNEL_MANAGER, "Error on VM lookup with libvirt: "
790 "VM '%s' not found\n", vm_name);
794 new_domain = rte_malloc("virtual_machine_info", sizeof(*new_domain),
795 RTE_CACHE_LINE_SIZE);
796 if (new_domain == NULL) {
797 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to allocate memory for VM "
801 new_domain->domainPtr = dom_ptr;
802 if (virDomainGetInfo(new_domain->domainPtr, &new_domain->info) != 0) {
803 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to get libvirt VM info\n");
804 rte_free(new_domain);
807 if (new_domain->info.nrVirtCpu > CHANNEL_CMDS_MAX_CPUS) {
808 RTE_LOG(ERR, CHANNEL_MANAGER, "Error the number of virtual CPUs(%u) is "
809 "greater than allowable(%d)\n", new_domain->info.nrVirtCpu,
810 CHANNEL_CMDS_MAX_CPUS);
811 rte_free(new_domain);
815 for (i = 0; i < CHANNEL_CMDS_MAX_CPUS; i++) {
816 rte_atomic64_init(&new_domain->pcpu_mask[i]);
818 if (update_pcpus_mask(new_domain) < 0) {
819 RTE_LOG(ERR, CHANNEL_MANAGER, "Error getting physical CPU pinning\n");
820 rte_free(new_domain);
823 strncpy(new_domain->name, vm_name, sizeof(new_domain->name));
824 new_domain->name[sizeof(new_domain->name) - 1] = '\0';
825 new_domain->channel_mask = 0;
826 new_domain->num_channels = 0;
828 if (!virDomainIsActive(dom_ptr))
829 new_domain->status = CHANNEL_MGR_VM_INACTIVE;
831 new_domain->status = CHANNEL_MGR_VM_ACTIVE;
833 rte_spinlock_init(&(new_domain->config_spinlock));
834 LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info);
839 remove_vm(const char *vm_name)
841 struct virtual_machine_info *vm_info = find_domain_by_name(vm_name);
843 if (vm_info == NULL) {
844 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM: VM '%s' "
845 "not found\n", vm_name);
848 rte_spinlock_lock(&vm_info->config_spinlock);
849 if (vm_info->num_channels != 0) {
850 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to remove VM '%s', there are "
851 "%"PRId8" channels still active\n",
852 vm_name, vm_info->num_channels);
853 rte_spinlock_unlock(&vm_info->config_spinlock);
856 LIST_REMOVE(vm_info, vms_info);
857 rte_spinlock_unlock(&vm_info->config_spinlock);
863 disconnect_hypervisor(void)
865 if (global_vir_conn_ptr != NULL) {
866 virConnectClose(global_vir_conn_ptr);
867 global_vir_conn_ptr = NULL;
872 connect_hypervisor(const char *path)
874 if (global_vir_conn_ptr != NULL) {
875 RTE_LOG(ERR, CHANNEL_MANAGER, "Error connecting to %s, connection "
876 "already established\n", path);
879 global_vir_conn_ptr = virConnectOpen(path);
880 if (global_vir_conn_ptr == NULL) {
881 RTE_LOG(ERR, CHANNEL_MANAGER, "Error failed to open connection to "
882 "Hypervisor '%s'\n", path);
888 channel_manager_init(const char *path __rte_unused)
892 LIST_INIT(&vm_list_head);
893 if (connect_hypervisor(path) < 0) {
894 global_n_host_cpus = 64;
895 global_hypervisor_available = 0;
896 RTE_LOG(INFO, CHANNEL_MANAGER, "Unable to initialize channel manager\n");
898 global_hypervisor_available = 1;
900 global_maplen = VIR_CPU_MAPLEN(CHANNEL_CMDS_MAX_CPUS);
902 global_vircpuinfo = rte_zmalloc(NULL,
903 sizeof(*global_vircpuinfo) *
904 CHANNEL_CMDS_MAX_CPUS, RTE_CACHE_LINE_SIZE);
905 if (global_vircpuinfo == NULL) {
906 RTE_LOG(ERR, CHANNEL_MANAGER, "Error allocating memory for CPU Info\n");
909 global_cpumaps = rte_zmalloc(NULL,
910 CHANNEL_CMDS_MAX_CPUS * global_maplen,
911 RTE_CACHE_LINE_SIZE);
912 if (global_cpumaps == NULL)
915 if (virNodeGetInfo(global_vir_conn_ptr, &info)) {
916 RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
919 global_n_host_cpus = (unsigned int)info.cpus;
924 if (global_n_host_cpus > CHANNEL_CMDS_MAX_CPUS) {
925 RTE_LOG(WARNING, CHANNEL_MANAGER, "The number of host CPUs(%u) exceeds the "
926 "maximum of %u. No cores over %u should be used.\n",
927 global_n_host_cpus, CHANNEL_CMDS_MAX_CPUS,
928 CHANNEL_CMDS_MAX_CPUS - 1);
929 global_n_host_cpus = CHANNEL_CMDS_MAX_CPUS;
934 if (global_hypervisor_available)
935 disconnect_hypervisor();
940 channel_manager_exit(void)
944 struct virtual_machine_info *vm_info;
946 LIST_FOREACH(vm_info, &vm_list_head, vms_info) {
948 rte_spinlock_lock(&(vm_info->config_spinlock));
950 mask = vm_info->channel_mask;
951 ITERATIVE_BITMASK_CHECK_64(mask, i) {
952 remove_channel_from_monitor(vm_info->channels[i]);
953 close(vm_info->channels[i]->fd);
954 rte_free(vm_info->channels[i]);
956 rte_spinlock_unlock(&(vm_info->config_spinlock));
958 LIST_REMOVE(vm_info, vms_info);
962 if (global_hypervisor_available) {
963 /* Only needed if hypervisor available */
964 rte_free(global_cpumaps);
965 rte_free(global_vircpuinfo);
966 disconnect_hypervisor();