1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <sys/epoll.h>
11 #include <linux/virtio_net.h>
14 #include <rte_eal_paging.h>
15 #include <rte_malloc.h>
16 #include <rte_memory.h>
17 #include <rte_bus_pci.h>
18 #include <rte_vhost.h>
20 #include <vdpa_driver.h>
22 #include <rte_spinlock.h>
24 #include <rte_kvargs.h>
25 #include <rte_devargs.h>
27 #include "base/ifcvf.h"
29 RTE_LOG_REGISTER(ifcvf_vdpa_logtype, pmd.vdpa.ifcvf, NOTICE);
30 #define DRV_LOG(level, fmt, args...) \
31 rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
32 "IFCVF %s(): " fmt "\n", __func__, ##args)
34 #define IFCVF_USED_RING_LEN(size) \
35 ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
37 #define IFCVF_VDPA_MODE "vdpa"
38 #define IFCVF_SW_FALLBACK_LM "sw-live-migration"
40 #define THREAD_NAME_LEN 16
42 static const char * const ifcvf_valid_arguments[] = {
48 struct ifcvf_internal {
49 struct rte_pci_device *pdev;
52 int vfio_container_fd;
55 pthread_t tid; /* thread for notify relay */
58 struct rte_vdpa_device *vdev;
61 rte_atomic32_t started;
62 rte_atomic32_t dev_attached;
63 rte_atomic32_t running;
66 bool sw_fallback_running;
67 /* mediated vring for sw fallback */
68 struct vring m_vring[IFCVF_MAX_QUEUES * 2];
69 /* eventfd for used ring interrupt */
70 int intr_fd[IFCVF_MAX_QUEUES * 2];
73 struct internal_list {
74 TAILQ_ENTRY(internal_list) next;
75 struct ifcvf_internal *internal;
78 /* vdpa device info includes device features and devcic operation. */
79 struct rte_vdpa_dev_info {
81 struct rte_vdpa_dev_ops *ops;
84 TAILQ_HEAD(internal_list_head, internal_list);
85 static struct internal_list_head internal_list =
86 TAILQ_HEAD_INITIALIZER(internal_list);
88 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
90 static void update_used_ring(struct ifcvf_internal *internal, uint16_t qid);
92 static struct internal_list *
93 find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
96 struct internal_list *list;
98 pthread_mutex_lock(&internal_list_lock);
100 TAILQ_FOREACH(list, &internal_list, next) {
101 if (vdev == list->internal->vdev) {
107 pthread_mutex_unlock(&internal_list_lock);
115 static struct internal_list *
116 find_internal_resource_by_dev(struct rte_pci_device *pdev)
119 struct internal_list *list;
121 pthread_mutex_lock(&internal_list_lock);
123 TAILQ_FOREACH(list, &internal_list, next) {
124 if (!rte_pci_addr_cmp(&pdev->addr,
125 &list->internal->pdev->addr)) {
131 pthread_mutex_unlock(&internal_list_lock);
140 ifcvf_vfio_setup(struct ifcvf_internal *internal)
142 struct rte_pci_device *dev = internal->pdev;
143 char devname[RTE_DEV_NAME_MAX_LEN] = {0};
147 internal->vfio_dev_fd = -1;
148 internal->vfio_group_fd = -1;
149 internal->vfio_container_fd = -1;
151 rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
152 ret = rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
155 DRV_LOG(ERR, "%s failed to get IOMMU group", devname);
159 internal->vfio_container_fd = rte_vfio_container_create();
160 if (internal->vfio_container_fd < 0)
163 internal->vfio_group_fd = rte_vfio_container_group_bind(
164 internal->vfio_container_fd, iommu_group_num);
165 if (internal->vfio_group_fd < 0)
168 if (rte_pci_map_device(dev))
171 internal->vfio_dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
173 for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
175 internal->hw.mem_resource[i].addr =
176 internal->pdev->mem_resource[i].addr;
177 internal->hw.mem_resource[i].phys_addr =
178 internal->pdev->mem_resource[i].phys_addr;
179 internal->hw.mem_resource[i].len =
180 internal->pdev->mem_resource[i].len;
186 rte_vfio_container_destroy(internal->vfio_container_fd);
191 ifcvf_dma_map(struct ifcvf_internal *internal, bool do_map)
195 struct rte_vhost_memory *mem = NULL;
196 int vfio_container_fd;
198 ret = rte_vhost_get_mem_table(internal->vid, &mem);
200 DRV_LOG(ERR, "failed to get VM memory layout.");
204 vfio_container_fd = internal->vfio_container_fd;
206 for (i = 0; i < mem->nregions; i++) {
207 struct rte_vhost_mem_region *reg;
209 reg = &mem->regions[i];
210 DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", "
211 "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".",
212 do_map ? "DMA map" : "DMA unmap", i,
213 reg->host_user_addr, reg->guest_phys_addr, reg->size);
216 ret = rte_vfio_container_dma_map(vfio_container_fd,
217 reg->host_user_addr, reg->guest_phys_addr,
220 DRV_LOG(ERR, "DMA map failed.");
224 ret = rte_vfio_container_dma_unmap(vfio_container_fd,
225 reg->host_user_addr, reg->guest_phys_addr,
228 DRV_LOG(ERR, "DMA unmap failed.");
240 hva_to_gpa(int vid, uint64_t hva)
242 struct rte_vhost_memory *mem = NULL;
243 struct rte_vhost_mem_region *reg;
247 if (rte_vhost_get_mem_table(vid, &mem) < 0)
250 for (i = 0; i < mem->nregions; i++) {
251 reg = &mem->regions[i];
253 if (hva >= reg->host_user_addr &&
254 hva < reg->host_user_addr + reg->size) {
255 gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
266 vdpa_ifcvf_start(struct ifcvf_internal *internal)
268 struct ifcvf_hw *hw = &internal->hw;
271 struct rte_vhost_vring vq;
275 nr_vring = rte_vhost_get_vring_num(vid);
276 rte_vhost_get_negotiated_features(vid, &hw->req_features);
278 for (i = 0; i < nr_vring; i++) {
279 rte_vhost_get_vhost_vring(vid, i, &vq);
280 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
282 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
285 hw->vring[i].desc = gpa;
287 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
289 DRV_LOG(ERR, "Fail to get GPA for available ring.");
292 hw->vring[i].avail = gpa;
294 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
296 DRV_LOG(ERR, "Fail to get GPA for used ring.");
299 hw->vring[i].used = gpa;
301 hw->vring[i].size = vq.size;
302 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
303 &hw->vring[i].last_used_idx);
307 return ifcvf_start_hw(&internal->hw);
311 vdpa_ifcvf_stop(struct ifcvf_internal *internal)
313 struct ifcvf_hw *hw = &internal->hw;
316 uint64_t features = 0;
317 uint64_t log_base = 0, log_size = 0;
323 for (i = 0; i < hw->nr_vring; i++)
324 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
325 hw->vring[i].last_used_idx);
330 rte_vhost_get_negotiated_features(vid, &features);
331 if (RTE_VHOST_NEED_LOG(features)) {
332 ifcvf_disable_logging(hw);
333 rte_vhost_get_log_base(internal->vid, &log_base, &log_size);
334 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
335 log_base, IFCVF_LOG_BASE, log_size);
337 * IFCVF marks dirty memory pages for only packet buffer,
338 * SW helps to mark the used ring as dirty after device stops.
340 for (i = 0; i < hw->nr_vring; i++) {
341 len = IFCVF_USED_RING_LEN(hw->vring[i].size);
342 rte_vhost_log_used_vring(vid, i, 0, len);
347 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
348 sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
350 vdpa_enable_vfio_intr(struct ifcvf_internal *internal, bool m_rx)
353 uint32_t i, nr_vring;
354 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
355 struct vfio_irq_set *irq_set;
357 struct rte_vhost_vring vring;
362 nr_vring = rte_vhost_get_vring_num(internal->vid);
364 irq_set = (struct vfio_irq_set *)irq_set_buf;
365 irq_set->argsz = sizeof(irq_set_buf);
366 irq_set->count = nr_vring + 1;
367 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
368 VFIO_IRQ_SET_ACTION_TRIGGER;
369 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
371 fd_ptr = (int *)&irq_set->data;
372 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] =
373 rte_intr_fd_get(internal->pdev->intr_handle);
375 for (i = 0; i < nr_vring; i++)
376 internal->intr_fd[i] = -1;
378 for (i = 0; i < nr_vring; i++) {
379 rte_vhost_get_vhost_vring(internal->vid, i, &vring);
380 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
381 if ((i & 1) == 0 && m_rx == true) {
382 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
384 DRV_LOG(ERR, "can't setup eventfd: %s",
388 internal->intr_fd[i] = fd;
389 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = fd;
393 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
395 DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s",
404 vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
407 uint32_t i, nr_vring;
408 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
409 struct vfio_irq_set *irq_set;
411 irq_set = (struct vfio_irq_set *)irq_set_buf;
412 irq_set->argsz = sizeof(irq_set_buf);
414 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
415 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
418 nr_vring = rte_vhost_get_vring_num(internal->vid);
419 for (i = 0; i < nr_vring; i++) {
420 if (internal->intr_fd[i] >= 0)
421 close(internal->intr_fd[i]);
422 internal->intr_fd[i] = -1;
425 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
427 DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
436 notify_relay(void *arg)
438 int i, kickfd, epfd, nfds = 0;
440 struct epoll_event events[IFCVF_MAX_QUEUES * 2];
441 struct epoll_event ev;
444 struct rte_vhost_vring vring;
445 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
446 struct ifcvf_hw *hw = &internal->hw;
448 q_num = rte_vhost_get_vring_num(internal->vid);
450 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
452 DRV_LOG(ERR, "failed to create epoll instance.");
455 internal->epfd = epfd;
458 for (qid = 0; qid < q_num; qid++) {
459 ev.events = EPOLLIN | EPOLLPRI;
460 rte_vhost_get_vhost_vring(internal->vid, qid, &vring);
461 ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
462 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
463 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
469 nfds = epoll_wait(epfd, events, q_num, -1);
473 DRV_LOG(ERR, "epoll_wait return fail\n");
477 for (i = 0; i < nfds; i++) {
478 qid = events[i].data.u32;
479 kickfd = (uint32_t)(events[i].data.u64 >> 32);
481 nbytes = read(kickfd, &buf, 8);
483 if (errno == EINTR ||
484 errno == EWOULDBLOCK ||
487 DRV_LOG(INFO, "Error reading "
494 ifcvf_notify_queue(hw, qid);
502 setup_notify_relay(struct ifcvf_internal *internal)
504 char name[THREAD_NAME_LEN];
507 snprintf(name, sizeof(name), "ifc-notify-%d", internal->vid);
508 ret = rte_ctrl_thread_create(&internal->tid, name, NULL, notify_relay,
511 DRV_LOG(ERR, "failed to create notify relay pthread.");
519 unset_notify_relay(struct ifcvf_internal *internal)
524 pthread_cancel(internal->tid);
525 pthread_join(internal->tid, &status);
529 if (internal->epfd >= 0)
530 close(internal->epfd);
537 update_datapath(struct ifcvf_internal *internal)
541 rte_spinlock_lock(&internal->lock);
543 if (!rte_atomic32_read(&internal->running) &&
544 (rte_atomic32_read(&internal->started) &&
545 rte_atomic32_read(&internal->dev_attached))) {
546 ret = ifcvf_dma_map(internal, true);
550 ret = vdpa_enable_vfio_intr(internal, false);
554 ret = vdpa_ifcvf_start(internal);
558 ret = setup_notify_relay(internal);
562 rte_atomic32_set(&internal->running, 1);
563 } else if (rte_atomic32_read(&internal->running) &&
564 (!rte_atomic32_read(&internal->started) ||
565 !rte_atomic32_read(&internal->dev_attached))) {
566 ret = unset_notify_relay(internal);
570 vdpa_ifcvf_stop(internal);
572 ret = vdpa_disable_vfio_intr(internal);
576 ret = ifcvf_dma_map(internal, false);
580 rte_atomic32_set(&internal->running, 0);
583 rte_spinlock_unlock(&internal->lock);
586 rte_spinlock_unlock(&internal->lock);
591 m_ifcvf_start(struct ifcvf_internal *internal)
593 struct ifcvf_hw *hw = &internal->hw;
594 uint32_t i, nr_vring;
596 struct rte_vhost_vring vq;
598 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
602 memset(&vq, 0, sizeof(vq));
604 nr_vring = rte_vhost_get_vring_num(vid);
605 rte_vhost_get_negotiated_features(vid, &hw->req_features);
607 for (i = 0; i < nr_vring; i++) {
608 rte_vhost_get_vhost_vring(vid, i, &vq);
610 size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
611 rte_mem_page_size());
612 vring_buf = rte_zmalloc("ifcvf", size, rte_mem_page_size());
613 vring_init(&internal->m_vring[i], vq.size, vring_buf,
614 rte_mem_page_size());
616 ret = rte_vfio_container_dma_map(internal->vfio_container_fd,
617 (uint64_t)(uintptr_t)vring_buf, m_vring_iova, size);
619 DRV_LOG(ERR, "mediated vring DMA map failed.");
623 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
625 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
628 hw->vring[i].desc = gpa;
630 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
632 DRV_LOG(ERR, "Fail to get GPA for available ring.");
635 hw->vring[i].avail = gpa;
637 /* Direct I/O for Tx queue, relay for Rx queue */
639 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
641 DRV_LOG(ERR, "Fail to get GPA for used ring.");
644 hw->vring[i].used = gpa;
646 hw->vring[i].used = m_vring_iova +
647 (char *)internal->m_vring[i].used -
648 (char *)internal->m_vring[i].desc;
651 hw->vring[i].size = vq.size;
653 rte_vhost_get_vring_base(vid, i,
654 &internal->m_vring[i].avail->idx,
655 &internal->m_vring[i].used->idx);
657 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
658 &hw->vring[i].last_used_idx);
660 m_vring_iova += size;
662 hw->nr_vring = nr_vring;
664 return ifcvf_start_hw(&internal->hw);
667 for (i = 0; i < nr_vring; i++)
668 rte_free(internal->m_vring[i].desc);
674 m_ifcvf_stop(struct ifcvf_internal *internal)
678 struct rte_vhost_vring vq;
679 struct ifcvf_hw *hw = &internal->hw;
680 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
686 for (i = 0; i < hw->nr_vring; i++) {
687 /* synchronize remaining new used entries if any */
689 update_used_ring(internal, i);
691 rte_vhost_get_vhost_vring(vid, i, &vq);
692 len = IFCVF_USED_RING_LEN(vq.size);
693 rte_vhost_log_used_vring(vid, i, 0, len);
695 size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
696 rte_mem_page_size());
697 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
698 (uint64_t)(uintptr_t)internal->m_vring[i].desc,
701 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
702 hw->vring[i].last_used_idx);
703 rte_free(internal->m_vring[i].desc);
704 m_vring_iova += size;
711 update_used_ring(struct ifcvf_internal *internal, uint16_t qid)
713 rte_vdpa_relay_vring_used(internal->vid, qid, &internal->m_vring[qid]);
714 rte_vhost_vring_call(internal->vid, qid);
718 vring_relay(void *arg)
720 int i, vid, epfd, fd, nfds;
721 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
722 struct rte_vhost_vring vring;
724 struct epoll_event events[IFCVF_MAX_QUEUES * 4];
725 struct epoll_event ev;
730 q_num = rte_vhost_get_vring_num(vid);
732 /* add notify fd and interrupt fd to epoll */
733 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
735 DRV_LOG(ERR, "failed to create epoll instance.");
738 internal->epfd = epfd;
741 for (qid = 0; qid < q_num; qid++) {
742 ev.events = EPOLLIN | EPOLLPRI;
743 rte_vhost_get_vhost_vring(vid, qid, &vring);
744 ev.data.u64 = qid << 1 | (uint64_t)vring.kickfd << 32;
745 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
746 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
751 for (qid = 0; qid < q_num; qid += 2) {
752 ev.events = EPOLLIN | EPOLLPRI;
753 /* leave a flag to mark it's for interrupt */
754 ev.data.u64 = 1 | qid << 1 |
755 (uint64_t)internal->intr_fd[qid] << 32;
756 if (epoll_ctl(epfd, EPOLL_CTL_ADD, internal->intr_fd[qid], &ev)
758 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
761 update_used_ring(internal, qid);
764 /* start relay with a first kick */
765 for (qid = 0; qid < q_num; qid++)
766 ifcvf_notify_queue(&internal->hw, qid);
768 /* listen to the events and react accordingly */
770 nfds = epoll_wait(epfd, events, q_num * 2, -1);
774 DRV_LOG(ERR, "epoll_wait return fail\n");
778 for (i = 0; i < nfds; i++) {
779 fd = (uint32_t)(events[i].data.u64 >> 32);
781 nbytes = read(fd, &buf, 8);
783 if (errno == EINTR ||
784 errno == EWOULDBLOCK ||
787 DRV_LOG(INFO, "Error reading "
794 qid = events[i].data.u32 >> 1;
796 if (events[i].data.u32 & 1)
797 update_used_ring(internal, qid);
799 ifcvf_notify_queue(&internal->hw, qid);
807 setup_vring_relay(struct ifcvf_internal *internal)
809 char name[THREAD_NAME_LEN];
812 snprintf(name, sizeof(name), "ifc-vring-%d", internal->vid);
813 ret = rte_ctrl_thread_create(&internal->tid, name, NULL, vring_relay,
816 DRV_LOG(ERR, "failed to create ring relay pthread.");
824 unset_vring_relay(struct ifcvf_internal *internal)
829 pthread_cancel(internal->tid);
830 pthread_join(internal->tid, &status);
834 if (internal->epfd >= 0)
835 close(internal->epfd);
842 ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal)
845 int vid = internal->vid;
847 /* stop the direct IO data path */
848 unset_notify_relay(internal);
849 vdpa_ifcvf_stop(internal);
850 vdpa_disable_vfio_intr(internal);
852 ret = rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, false);
853 if (ret && ret != -ENOTSUP)
856 /* set up interrupt for interrupt relay */
857 ret = vdpa_enable_vfio_intr(internal, true);
862 ret = m_ifcvf_start(internal);
866 /* set up vring relay thread */
867 ret = setup_vring_relay(internal);
871 rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true);
873 internal->sw_fallback_running = true;
878 m_ifcvf_stop(internal);
880 vdpa_disable_vfio_intr(internal);
882 ifcvf_dma_map(internal, false);
888 ifcvf_dev_config(int vid)
890 struct rte_vdpa_device *vdev;
891 struct internal_list *list;
892 struct ifcvf_internal *internal;
894 vdev = rte_vhost_get_vdpa_device(vid);
895 list = find_internal_resource_by_vdev(vdev);
897 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
901 internal = list->internal;
903 rte_atomic32_set(&internal->dev_attached, 1);
904 update_datapath(internal);
906 if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
907 DRV_LOG(NOTICE, "vDPA (%s): software relay is used.",
910 internal->configured = 1;
915 ifcvf_dev_close(int vid)
917 struct rte_vdpa_device *vdev;
918 struct internal_list *list;
919 struct ifcvf_internal *internal;
921 vdev = rte_vhost_get_vdpa_device(vid);
922 list = find_internal_resource_by_vdev(vdev);
924 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
928 internal = list->internal;
930 if (internal->sw_fallback_running) {
931 /* unset ring relay */
932 unset_vring_relay(internal);
935 m_ifcvf_stop(internal);
937 /* remove interrupt setting */
938 vdpa_disable_vfio_intr(internal);
940 /* unset DMA map for guest memory */
941 ifcvf_dma_map(internal, false);
943 internal->sw_fallback_running = false;
945 rte_atomic32_set(&internal->dev_attached, 0);
946 update_datapath(internal);
949 internal->configured = 0;
954 ifcvf_set_features(int vid)
956 uint64_t features = 0;
957 struct rte_vdpa_device *vdev;
958 struct internal_list *list;
959 struct ifcvf_internal *internal;
960 uint64_t log_base = 0, log_size = 0;
962 vdev = rte_vhost_get_vdpa_device(vid);
963 list = find_internal_resource_by_vdev(vdev);
965 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
969 internal = list->internal;
970 rte_vhost_get_negotiated_features(vid, &features);
972 if (!RTE_VHOST_NEED_LOG(features))
975 if (internal->sw_lm) {
976 ifcvf_sw_fallback_switchover(internal);
978 rte_vhost_get_log_base(vid, &log_base, &log_size);
979 rte_vfio_container_dma_map(internal->vfio_container_fd,
980 log_base, IFCVF_LOG_BASE, log_size);
981 ifcvf_enable_logging(&internal->hw, IFCVF_LOG_BASE, log_size);
988 ifcvf_get_vfio_group_fd(int vid)
990 struct rte_vdpa_device *vdev;
991 struct internal_list *list;
993 vdev = rte_vhost_get_vdpa_device(vid);
994 list = find_internal_resource_by_vdev(vdev);
996 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1000 return list->internal->vfio_group_fd;
1004 ifcvf_get_vfio_device_fd(int vid)
1006 struct rte_vdpa_device *vdev;
1007 struct internal_list *list;
1009 vdev = rte_vhost_get_vdpa_device(vid);
1010 list = find_internal_resource_by_vdev(vdev);
1012 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1016 return list->internal->vfio_dev_fd;
1020 ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
1022 struct rte_vdpa_device *vdev;
1023 struct internal_list *list;
1024 struct ifcvf_internal *internal;
1025 struct vfio_region_info reg = { .argsz = sizeof(reg) };
1028 vdev = rte_vhost_get_vdpa_device(vid);
1029 list = find_internal_resource_by_vdev(vdev);
1031 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1035 internal = list->internal;
1037 reg.index = ifcvf_get_notify_region(&internal->hw);
1038 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
1040 DRV_LOG(ERR, "Get not get device region info: %s",
1045 *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset;
1052 ifcvf_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
1054 struct internal_list *list;
1056 list = find_internal_resource_by_vdev(vdev);
1058 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1062 *queue_num = list->internal->max_queues;
1068 ifcvf_get_vdpa_features(struct rte_vdpa_device *vdev, uint64_t *features)
1070 struct internal_list *list;
1072 list = find_internal_resource_by_vdev(vdev);
1074 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1078 *features = list->internal->features;
1083 #define VDPA_SUPPORTED_PROTOCOL_FEATURES \
1084 (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
1085 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
1086 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
1087 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
1088 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | \
1089 1ULL << VHOST_USER_PROTOCOL_F_STATUS)
1091 ifcvf_get_protocol_features(struct rte_vdpa_device *vdev, uint64_t *features)
1095 *features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
1100 ifcvf_set_vring_state(int vid, int vring, int state)
1102 struct rte_vdpa_device *vdev;
1103 struct internal_list *list;
1104 struct ifcvf_internal *internal;
1105 struct ifcvf_hw *hw;
1106 struct ifcvf_pci_common_cfg *cfg;
1109 vdev = rte_vhost_get_vdpa_device(vid);
1110 list = find_internal_resource_by_vdev(vdev);
1112 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1116 internal = list->internal;
1117 if (vring < 0 || vring >= internal->max_queues * 2) {
1118 DRV_LOG(ERR, "Vring index %d not correct", vring);
1123 if (!internal->configured)
1126 cfg = hw->common_cfg;
1127 IFCVF_WRITE_REG16(vring, &cfg->queue_select);
1128 IFCVF_WRITE_REG16(!!state, &cfg->queue_enable);
1130 if (!state && hw->vring[vring].enable) {
1131 ret = vdpa_disable_vfio_intr(internal);
1136 if (state && !hw->vring[vring].enable) {
1137 ret = vdpa_enable_vfio_intr(internal, false);
1143 hw->vring[vring].enable = !!state;
1147 static struct rte_vdpa_dev_ops ifcvf_net_ops = {
1148 .get_queue_num = ifcvf_get_queue_num,
1149 .get_features = ifcvf_get_vdpa_features,
1150 .get_protocol_features = ifcvf_get_protocol_features,
1151 .dev_conf = ifcvf_dev_config,
1152 .dev_close = ifcvf_dev_close,
1153 .set_vring_state = ifcvf_set_vring_state,
1154 .set_features = ifcvf_set_features,
1155 .migration_done = NULL,
1156 .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
1157 .get_vfio_device_fd = ifcvf_get_vfio_device_fd,
1158 .get_notify_area = ifcvf_get_notify_area,
1162 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1164 uint16_t *n = extra_args;
1166 if (value == NULL || extra_args == NULL)
1169 *n = (uint16_t)strtoul(value, NULL, 0);
1170 if (*n == USHRT_MAX && errno == ERANGE)
1177 ifcvf_pci_get_device_type(struct rte_pci_device *pci_dev)
1179 uint16_t pci_device_id = pci_dev->id.device_id;
1182 if (pci_device_id < 0x1000 || pci_device_id > 0x107f) {
1183 DRV_LOG(ERR, "Probe device is not a virtio device\n");
1187 if (pci_device_id < 0x1040) {
1188 /* Transitional devices: use the PCI subsystem device id as
1189 * virtio device id, same as legacy driver always did.
1191 device_id = pci_dev->id.subsystem_device_id;
1193 /* Modern devices: simply use PCI device id,
1194 * but start from 0x1040.
1196 device_id = pci_device_id - 0x1040;
1202 struct rte_vdpa_dev_info dev_info[] = {
1204 .features = (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
1205 (1ULL << VIRTIO_NET_F_CTRL_VQ) |
1206 (1ULL << VIRTIO_NET_F_STATUS) |
1207 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
1208 (1ULL << VHOST_F_LOG_ALL),
1209 .ops = &ifcvf_net_ops,
1212 .features = (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
1213 (1ULL << VHOST_F_LOG_ALL),
1219 ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1220 struct rte_pci_device *pci_dev)
1223 struct ifcvf_internal *internal = NULL;
1224 struct internal_list *list = NULL;
1226 int sw_fallback_lm = 0;
1227 struct rte_kvargs *kvlist = NULL;
1231 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1234 if (!pci_dev->device.devargs)
1237 kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
1238 ifcvf_valid_arguments);
1242 /* probe only when vdpa mode is specified */
1243 if (rte_kvargs_count(kvlist, IFCVF_VDPA_MODE) == 0) {
1244 rte_kvargs_free(kvlist);
1248 ret = rte_kvargs_process(kvlist, IFCVF_VDPA_MODE, &open_int,
1250 if (ret < 0 || vdpa_mode == 0) {
1251 rte_kvargs_free(kvlist);
1255 list = rte_zmalloc("ifcvf", sizeof(*list), 0);
1259 internal = rte_zmalloc("ifcvf", sizeof(*internal), 0);
1260 if (internal == NULL)
1263 internal->pdev = pci_dev;
1264 rte_spinlock_init(&internal->lock);
1266 if (ifcvf_vfio_setup(internal) < 0) {
1267 DRV_LOG(ERR, "failed to setup device %s", pci_dev->name);
1271 if (ifcvf_init_hw(&internal->hw, internal->pdev) < 0) {
1272 DRV_LOG(ERR, "failed to init device %s", pci_dev->name);
1276 internal->configured = 0;
1277 internal->max_queues = IFCVF_MAX_QUEUES;
1278 features = ifcvf_get_features(&internal->hw);
1280 device_id = ifcvf_pci_get_device_type(pci_dev);
1281 if (device_id < 0) {
1282 DRV_LOG(ERR, "failed to get device %s type", pci_dev->name);
1286 if (device_id == VIRTIO_ID_NET) {
1287 internal->hw.device_type = IFCVF_NET;
1288 internal->features = features &
1289 ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
1290 internal->features |= dev_info[IFCVF_NET].features;
1291 } else if (device_id == VIRTIO_ID_BLOCK) {
1292 internal->hw.device_type = IFCVF_BLK;
1293 internal->features = features &
1294 ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
1295 internal->features |= dev_info[IFCVF_BLK].features;
1298 list->internal = internal;
1300 if (rte_kvargs_count(kvlist, IFCVF_SW_FALLBACK_LM)) {
1301 ret = rte_kvargs_process(kvlist, IFCVF_SW_FALLBACK_LM,
1302 &open_int, &sw_fallback_lm);
1306 internal->sw_lm = sw_fallback_lm;
1308 internal->vdev = rte_vdpa_register_device(&pci_dev->device,
1309 dev_info[internal->hw.device_type].ops);
1310 if (internal->vdev == NULL) {
1311 DRV_LOG(ERR, "failed to register device %s", pci_dev->name);
1315 pthread_mutex_lock(&internal_list_lock);
1316 TAILQ_INSERT_TAIL(&internal_list, list, next);
1317 pthread_mutex_unlock(&internal_list_lock);
1319 rte_atomic32_set(&internal->started, 1);
1320 update_datapath(internal);
1322 rte_kvargs_free(kvlist);
1326 rte_kvargs_free(kvlist);
1333 ifcvf_pci_remove(struct rte_pci_device *pci_dev)
1335 struct ifcvf_internal *internal;
1336 struct internal_list *list;
1338 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1341 list = find_internal_resource_by_dev(pci_dev);
1343 DRV_LOG(ERR, "Invalid device: %s", pci_dev->name);
1347 internal = list->internal;
1348 rte_atomic32_set(&internal->started, 0);
1349 update_datapath(internal);
1351 rte_pci_unmap_device(internal->pdev);
1352 rte_vfio_container_destroy(internal->vfio_container_fd);
1353 rte_vdpa_unregister_device(internal->vdev);
1355 pthread_mutex_lock(&internal_list_lock);
1356 TAILQ_REMOVE(&internal_list, list, next);
1357 pthread_mutex_unlock(&internal_list_lock);
1366 * IFCVF has the same vendor ID and device ID as virtio net PCI
1367 * device, with its specific subsystem vendor ID and device ID.
1369 static const struct rte_pci_id pci_id_ifcvf_map[] = {
1370 { .class_id = RTE_CLASS_ANY_ID,
1371 .vendor_id = IFCVF_VENDOR_ID,
1372 .device_id = IFCVF_NET_DEVICE_ID,
1373 .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
1374 .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
1377 { .class_id = RTE_CLASS_ANY_ID,
1378 .vendor_id = IFCVF_VENDOR_ID,
1379 .device_id = IFCVF_BLK_TRANSITIONAL_DEVICE_ID,
1380 .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
1381 .subsystem_device_id = IFCVF_BLK_DEVICE_ID,
1384 { .class_id = RTE_CLASS_ANY_ID,
1385 .vendor_id = IFCVF_VENDOR_ID,
1386 .device_id = IFCVF_BLK_MODERN_DEVICE_ID,
1387 .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
1388 .subsystem_device_id = IFCVF_BLK_DEVICE_ID,
1391 { .vendor_id = 0, /* sentinel */
1395 static struct rte_pci_driver rte_ifcvf_vdpa = {
1396 .id_table = pci_id_ifcvf_map,
1398 .probe = ifcvf_pci_probe,
1399 .remove = ifcvf_pci_remove,
1402 RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
1403 RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
1404 RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");