1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <linux/virtio_net.h>
13 #include <rte_malloc.h>
14 #include <rte_memory.h>
15 #include <rte_bus_pci.h>
16 #include <rte_vhost.h>
19 #include <rte_spinlock.h>
21 #include <rte_kvargs.h>
22 #include <rte_devargs.h>
24 #include "base/ifcvf.h"
26 #define DRV_LOG(level, fmt, args...) \
27 rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
28 "IFCVF %s(): " fmt "\n", __func__, ##args)
31 #define PAGE_SIZE 4096
34 #define IFCVF_USED_RING_LEN(size) \
35 ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
37 #define IFCVF_VDPA_MODE "vdpa"
38 #define IFCVF_SW_FALLBACK_LM "sw-live-migration"
40 static const char * const ifcvf_valid_arguments[] = {
46 static int ifcvf_vdpa_logtype;
48 struct ifcvf_internal {
49 struct rte_vdpa_dev_addr dev_addr;
50 struct rte_pci_device *pdev;
52 int vfio_container_fd;
55 pthread_t tid; /* thread for notify relay */
61 rte_atomic32_t started;
62 rte_atomic32_t dev_attached;
63 rte_atomic32_t running;
68 struct internal_list {
69 TAILQ_ENTRY(internal_list) next;
70 struct ifcvf_internal *internal;
73 TAILQ_HEAD(internal_list_head, internal_list);
74 static struct internal_list_head internal_list =
75 TAILQ_HEAD_INITIALIZER(internal_list);
77 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
79 static struct internal_list *
80 find_internal_resource_by_did(int did)
83 struct internal_list *list;
85 pthread_mutex_lock(&internal_list_lock);
87 TAILQ_FOREACH(list, &internal_list, next) {
88 if (did == list->internal->did) {
94 pthread_mutex_unlock(&internal_list_lock);
102 static struct internal_list *
103 find_internal_resource_by_dev(struct rte_pci_device *pdev)
106 struct internal_list *list;
108 pthread_mutex_lock(&internal_list_lock);
110 TAILQ_FOREACH(list, &internal_list, next) {
111 if (pdev == list->internal->pdev) {
117 pthread_mutex_unlock(&internal_list_lock);
126 ifcvf_vfio_setup(struct ifcvf_internal *internal)
128 struct rte_pci_device *dev = internal->pdev;
129 char devname[RTE_DEV_NAME_MAX_LEN] = {0};
133 internal->vfio_dev_fd = -1;
134 internal->vfio_group_fd = -1;
135 internal->vfio_container_fd = -1;
137 rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
138 rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
141 internal->vfio_container_fd = rte_vfio_container_create();
142 if (internal->vfio_container_fd < 0)
145 internal->vfio_group_fd = rte_vfio_container_group_bind(
146 internal->vfio_container_fd, iommu_group_num);
147 if (internal->vfio_group_fd < 0)
150 if (rte_pci_map_device(dev))
153 internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd;
155 for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
157 internal->hw.mem_resource[i].addr =
158 internal->pdev->mem_resource[i].addr;
159 internal->hw.mem_resource[i].phys_addr =
160 internal->pdev->mem_resource[i].phys_addr;
161 internal->hw.mem_resource[i].len =
162 internal->pdev->mem_resource[i].len;
168 rte_vfio_container_destroy(internal->vfio_container_fd);
173 ifcvf_dma_map(struct ifcvf_internal *internal, int do_map)
177 struct rte_vhost_memory *mem = NULL;
178 int vfio_container_fd;
180 ret = rte_vhost_get_mem_table(internal->vid, &mem);
182 DRV_LOG(ERR, "failed to get VM memory layout.");
186 vfio_container_fd = internal->vfio_container_fd;
188 for (i = 0; i < mem->nregions; i++) {
189 struct rte_vhost_mem_region *reg;
191 reg = &mem->regions[i];
192 DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", "
193 "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".",
194 do_map ? "DMA map" : "DMA unmap", i,
195 reg->host_user_addr, reg->guest_phys_addr, reg->size);
198 ret = rte_vfio_container_dma_map(vfio_container_fd,
199 reg->host_user_addr, reg->guest_phys_addr,
202 DRV_LOG(ERR, "DMA map failed.");
206 ret = rte_vfio_container_dma_unmap(vfio_container_fd,
207 reg->host_user_addr, reg->guest_phys_addr,
210 DRV_LOG(ERR, "DMA unmap failed.");
223 hva_to_gpa(int vid, uint64_t hva)
225 struct rte_vhost_memory *mem = NULL;
226 struct rte_vhost_mem_region *reg;
230 if (rte_vhost_get_mem_table(vid, &mem) < 0)
233 for (i = 0; i < mem->nregions; i++) {
234 reg = &mem->regions[i];
236 if (hva >= reg->host_user_addr &&
237 hva < reg->host_user_addr + reg->size) {
238 gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
250 vdpa_ifcvf_start(struct ifcvf_internal *internal)
252 struct ifcvf_hw *hw = &internal->hw;
255 struct rte_vhost_vring vq;
259 nr_vring = rte_vhost_get_vring_num(vid);
260 rte_vhost_get_negotiated_features(vid, &hw->req_features);
262 for (i = 0; i < nr_vring; i++) {
263 rte_vhost_get_vhost_vring(vid, i, &vq);
264 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
266 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
269 hw->vring[i].desc = gpa;
271 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
273 DRV_LOG(ERR, "Fail to get GPA for available ring.");
276 hw->vring[i].avail = gpa;
278 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
280 DRV_LOG(ERR, "Fail to get GPA for used ring.");
283 hw->vring[i].used = gpa;
285 hw->vring[i].size = vq.size;
286 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
287 &hw->vring[i].last_used_idx);
291 return ifcvf_start_hw(&internal->hw);
295 vdpa_ifcvf_stop(struct ifcvf_internal *internal)
297 struct ifcvf_hw *hw = &internal->hw;
301 uint64_t log_base, log_size;
307 for (i = 0; i < hw->nr_vring; i++)
308 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
309 hw->vring[i].last_used_idx);
311 rte_vhost_get_negotiated_features(vid, &features);
312 if (RTE_VHOST_NEED_LOG(features)) {
313 ifcvf_disable_logging(hw);
314 rte_vhost_get_log_base(internal->vid, &log_base, &log_size);
315 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
316 log_base, IFCVF_LOG_BASE, log_size);
318 * IFCVF marks dirty memory pages for only packet buffer,
319 * SW helps to mark the used ring as dirty after device stops.
321 for (i = 0; i < hw->nr_vring; i++) {
322 len = IFCVF_USED_RING_LEN(hw->vring[i].size);
323 rte_vhost_log_used_vring(vid, i, 0, len);
328 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
329 sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
331 vdpa_enable_vfio_intr(struct ifcvf_internal *internal)
334 uint32_t i, nr_vring;
335 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
336 struct vfio_irq_set *irq_set;
338 struct rte_vhost_vring vring;
340 nr_vring = rte_vhost_get_vring_num(internal->vid);
342 irq_set = (struct vfio_irq_set *)irq_set_buf;
343 irq_set->argsz = sizeof(irq_set_buf);
344 irq_set->count = nr_vring + 1;
345 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
346 VFIO_IRQ_SET_ACTION_TRIGGER;
347 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
349 fd_ptr = (int *)&irq_set->data;
350 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd;
352 for (i = 0; i < nr_vring; i++) {
353 rte_vhost_get_vhost_vring(internal->vid, i, &vring);
354 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
357 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
359 DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s",
368 vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
371 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
372 struct vfio_irq_set *irq_set;
374 irq_set = (struct vfio_irq_set *)irq_set_buf;
375 irq_set->argsz = sizeof(irq_set_buf);
377 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
378 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
381 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
383 DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
392 notify_relay(void *arg)
394 int i, kickfd, epfd, nfds = 0;
396 struct epoll_event events[IFCVF_MAX_QUEUES * 2];
397 struct epoll_event ev;
400 struct rte_vhost_vring vring;
401 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
402 struct ifcvf_hw *hw = &internal->hw;
404 q_num = rte_vhost_get_vring_num(internal->vid);
406 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
408 DRV_LOG(ERR, "failed to create epoll instance.");
411 internal->epfd = epfd;
413 for (qid = 0; qid < q_num; qid++) {
414 ev.events = EPOLLIN | EPOLLPRI;
415 rte_vhost_get_vhost_vring(internal->vid, qid, &vring);
416 ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
417 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
418 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
424 nfds = epoll_wait(epfd, events, q_num, -1);
428 DRV_LOG(ERR, "epoll_wait return fail\n");
432 for (i = 0; i < nfds; i++) {
433 qid = events[i].data.u32;
434 kickfd = (uint32_t)(events[i].data.u64 >> 32);
436 nbytes = read(kickfd, &buf, 8);
438 if (errno == EINTR ||
439 errno == EWOULDBLOCK ||
442 DRV_LOG(INFO, "Error reading "
449 ifcvf_notify_queue(hw, qid);
457 setup_notify_relay(struct ifcvf_internal *internal)
461 ret = pthread_create(&internal->tid, NULL, notify_relay,
464 DRV_LOG(ERR, "failed to create notify relay pthread.");
471 unset_notify_relay(struct ifcvf_internal *internal)
476 pthread_cancel(internal->tid);
477 pthread_join(internal->tid, &status);
481 if (internal->epfd >= 0)
482 close(internal->epfd);
489 update_datapath(struct ifcvf_internal *internal)
493 rte_spinlock_lock(&internal->lock);
495 if (!rte_atomic32_read(&internal->running) &&
496 (rte_atomic32_read(&internal->started) &&
497 rte_atomic32_read(&internal->dev_attached))) {
498 ret = ifcvf_dma_map(internal, 1);
502 ret = vdpa_enable_vfio_intr(internal);
506 ret = vdpa_ifcvf_start(internal);
510 ret = setup_notify_relay(internal);
514 rte_atomic32_set(&internal->running, 1);
515 } else if (rte_atomic32_read(&internal->running) &&
516 (!rte_atomic32_read(&internal->started) ||
517 !rte_atomic32_read(&internal->dev_attached))) {
518 ret = unset_notify_relay(internal);
522 vdpa_ifcvf_stop(internal);
524 ret = vdpa_disable_vfio_intr(internal);
528 ret = ifcvf_dma_map(internal, 0);
532 rte_atomic32_set(&internal->running, 0);
535 rte_spinlock_unlock(&internal->lock);
538 rte_spinlock_unlock(&internal->lock);
543 ifcvf_dev_config(int vid)
546 struct internal_list *list;
547 struct ifcvf_internal *internal;
549 did = rte_vhost_get_vdpa_device_id(vid);
550 list = find_internal_resource_by_did(did);
552 DRV_LOG(ERR, "Invalid device id: %d", did);
556 internal = list->internal;
558 rte_atomic32_set(&internal->dev_attached, 1);
559 update_datapath(internal);
561 if (rte_vhost_host_notifier_ctrl(vid, true) != 0)
562 DRV_LOG(NOTICE, "vDPA (%d): software relay is used.", did);
568 ifcvf_dev_close(int vid)
571 struct internal_list *list;
572 struct ifcvf_internal *internal;
574 did = rte_vhost_get_vdpa_device_id(vid);
575 list = find_internal_resource_by_did(did);
577 DRV_LOG(ERR, "Invalid device id: %d", did);
581 internal = list->internal;
582 rte_atomic32_set(&internal->dev_attached, 0);
583 update_datapath(internal);
589 ifcvf_set_features(int vid)
593 struct internal_list *list;
594 struct ifcvf_internal *internal;
595 uint64_t log_base, log_size;
597 did = rte_vhost_get_vdpa_device_id(vid);
598 list = find_internal_resource_by_did(did);
600 DRV_LOG(ERR, "Invalid device id: %d", did);
604 internal = list->internal;
605 rte_vhost_get_negotiated_features(vid, &features);
607 if (RTE_VHOST_NEED_LOG(features)) {
608 rte_vhost_get_log_base(vid, &log_base, &log_size);
609 rte_vfio_container_dma_map(internal->vfio_container_fd,
610 log_base, IFCVF_LOG_BASE, log_size);
611 ifcvf_enable_logging(&internal->hw, IFCVF_LOG_BASE, log_size);
618 ifcvf_get_vfio_group_fd(int vid)
621 struct internal_list *list;
623 did = rte_vhost_get_vdpa_device_id(vid);
624 list = find_internal_resource_by_did(did);
626 DRV_LOG(ERR, "Invalid device id: %d", did);
630 return list->internal->vfio_group_fd;
634 ifcvf_get_vfio_device_fd(int vid)
637 struct internal_list *list;
639 did = rte_vhost_get_vdpa_device_id(vid);
640 list = find_internal_resource_by_did(did);
642 DRV_LOG(ERR, "Invalid device id: %d", did);
646 return list->internal->vfio_dev_fd;
650 ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
653 struct internal_list *list;
654 struct ifcvf_internal *internal;
655 struct vfio_region_info reg = { .argsz = sizeof(reg) };
658 did = rte_vhost_get_vdpa_device_id(vid);
659 list = find_internal_resource_by_did(did);
661 DRV_LOG(ERR, "Invalid device id: %d", did);
665 internal = list->internal;
667 reg.index = ifcvf_get_notify_region(&internal->hw);
668 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
670 DRV_LOG(ERR, "Get not get device region info: %s",
675 *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset;
682 ifcvf_get_queue_num(int did, uint32_t *queue_num)
684 struct internal_list *list;
686 list = find_internal_resource_by_did(did);
688 DRV_LOG(ERR, "Invalid device id: %d", did);
692 *queue_num = list->internal->max_queues;
698 ifcvf_get_vdpa_features(int did, uint64_t *features)
700 struct internal_list *list;
702 list = find_internal_resource_by_did(did);
704 DRV_LOG(ERR, "Invalid device id: %d", did);
708 *features = list->internal->features;
713 #define VDPA_SUPPORTED_PROTOCOL_FEATURES \
714 (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
715 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
716 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
717 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
718 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD)
720 ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features)
722 *features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
726 static struct rte_vdpa_dev_ops ifcvf_ops = {
727 .get_queue_num = ifcvf_get_queue_num,
728 .get_features = ifcvf_get_vdpa_features,
729 .get_protocol_features = ifcvf_get_protocol_features,
730 .dev_conf = ifcvf_dev_config,
731 .dev_close = ifcvf_dev_close,
732 .set_vring_state = NULL,
733 .set_features = ifcvf_set_features,
734 .migration_done = NULL,
735 .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
736 .get_vfio_device_fd = ifcvf_get_vfio_device_fd,
737 .get_notify_area = ifcvf_get_notify_area,
741 open_int(const char *key __rte_unused, const char *value, void *extra_args)
743 uint16_t *n = extra_args;
745 if (value == NULL || extra_args == NULL)
748 *n = (uint16_t)strtoul(value, NULL, 0);
749 if (*n == USHRT_MAX && errno == ERANGE)
756 ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
757 struct rte_pci_device *pci_dev)
760 struct ifcvf_internal *internal = NULL;
761 struct internal_list *list = NULL;
763 int sw_fallback_lm = 0;
764 struct rte_kvargs *kvlist = NULL;
767 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
770 kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
771 ifcvf_valid_arguments);
775 /* probe only when vdpa mode is specified */
776 if (rte_kvargs_count(kvlist, IFCVF_VDPA_MODE) == 0) {
777 rte_kvargs_free(kvlist);
781 ret = rte_kvargs_process(kvlist, IFCVF_VDPA_MODE, &open_int,
783 if (ret < 0 || vdpa_mode == 0) {
784 rte_kvargs_free(kvlist);
788 list = rte_zmalloc("ifcvf", sizeof(*list), 0);
792 internal = rte_zmalloc("ifcvf", sizeof(*internal), 0);
793 if (internal == NULL)
796 internal->pdev = pci_dev;
797 rte_spinlock_init(&internal->lock);
799 if (ifcvf_vfio_setup(internal) < 0) {
800 DRV_LOG(ERR, "failed to setup device %s", pci_dev->name);
804 if (ifcvf_init_hw(&internal->hw, internal->pdev) < 0) {
805 DRV_LOG(ERR, "failed to init device %s", pci_dev->name);
809 internal->max_queues = IFCVF_MAX_QUEUES;
810 features = ifcvf_get_features(&internal->hw);
811 internal->features = (features &
812 ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
813 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
814 (1ULL << VIRTIO_NET_F_CTRL_VQ) |
815 (1ULL << VIRTIO_NET_F_STATUS) |
816 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
817 (1ULL << VHOST_F_LOG_ALL);
819 internal->dev_addr.pci_addr = pci_dev->addr;
820 internal->dev_addr.type = PCI_ADDR;
821 list->internal = internal;
823 if (rte_kvargs_count(kvlist, IFCVF_SW_FALLBACK_LM)) {
824 ret = rte_kvargs_process(kvlist, IFCVF_SW_FALLBACK_LM,
825 &open_int, &sw_fallback_lm);
829 internal->sw_lm = sw_fallback_lm;
831 internal->did = rte_vdpa_register_device(&internal->dev_addr,
833 if (internal->did < 0) {
834 DRV_LOG(ERR, "failed to register device %s", pci_dev->name);
838 pthread_mutex_lock(&internal_list_lock);
839 TAILQ_INSERT_TAIL(&internal_list, list, next);
840 pthread_mutex_unlock(&internal_list_lock);
842 rte_atomic32_set(&internal->started, 1);
843 update_datapath(internal);
845 rte_kvargs_free(kvlist);
849 rte_kvargs_free(kvlist);
856 ifcvf_pci_remove(struct rte_pci_device *pci_dev)
858 struct ifcvf_internal *internal;
859 struct internal_list *list;
861 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
864 list = find_internal_resource_by_dev(pci_dev);
866 DRV_LOG(ERR, "Invalid device: %s", pci_dev->name);
870 internal = list->internal;
871 rte_atomic32_set(&internal->started, 0);
872 update_datapath(internal);
874 rte_pci_unmap_device(internal->pdev);
875 rte_vfio_container_destroy(internal->vfio_container_fd);
876 rte_vdpa_unregister_device(internal->did);
878 pthread_mutex_lock(&internal_list_lock);
879 TAILQ_REMOVE(&internal_list, list, next);
880 pthread_mutex_unlock(&internal_list_lock);
889 * IFCVF has the same vendor ID and device ID as virtio net PCI
890 * device, with its specific subsystem vendor ID and device ID.
892 static const struct rte_pci_id pci_id_ifcvf_map[] = {
893 { .class_id = RTE_CLASS_ANY_ID,
894 .vendor_id = IFCVF_VENDOR_ID,
895 .device_id = IFCVF_DEVICE_ID,
896 .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
897 .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
900 { .vendor_id = 0, /* sentinel */
904 static struct rte_pci_driver rte_ifcvf_vdpa = {
905 .id_table = pci_id_ifcvf_map,
907 .probe = ifcvf_pci_probe,
908 .remove = ifcvf_pci_remove,
911 RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
912 RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
913 RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");
915 RTE_INIT(ifcvf_vdpa_init_log)
917 ifcvf_vdpa_logtype = rte_log_register("pmd.net.ifcvf_vdpa");
918 if (ifcvf_vdpa_logtype >= 0)
919 rte_log_set_level(ifcvf_vdpa_logtype, RTE_LOG_NOTICE);