1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <sys/epoll.h>
11 #include <linux/virtio_net.h>
14 #include <rte_malloc.h>
15 #include <rte_memory.h>
16 #include <rte_bus_pci.h>
17 #include <rte_vhost.h>
19 #include <rte_vdpa_dev.h>
21 #include <rte_spinlock.h>
23 #include <rte_kvargs.h>
24 #include <rte_devargs.h>
26 #include "base/ifcvf.h"
28 RTE_LOG_REGISTER(ifcvf_vdpa_logtype, pmd.net.ifcvf_vdpa, NOTICE);
29 #define DRV_LOG(level, fmt, args...) \
30 rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
31 "IFCVF %s(): " fmt "\n", __func__, ##args)
34 #define PAGE_SIZE 4096
37 #define IFCVF_USED_RING_LEN(size) \
38 ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
40 #define IFCVF_VDPA_MODE "vdpa"
41 #define IFCVF_SW_FALLBACK_LM "sw-live-migration"
43 static const char * const ifcvf_valid_arguments[] = {
49 struct ifcvf_internal {
50 struct rte_pci_device *pdev;
53 int vfio_container_fd;
56 pthread_t tid; /* thread for notify relay */
59 struct rte_vdpa_device *vdev;
62 rte_atomic32_t started;
63 rte_atomic32_t dev_attached;
64 rte_atomic32_t running;
67 bool sw_fallback_running;
68 /* mediated vring for sw fallback */
69 struct vring m_vring[IFCVF_MAX_QUEUES * 2];
70 /* eventfd for used ring interrupt */
71 int intr_fd[IFCVF_MAX_QUEUES * 2];
74 struct internal_list {
75 TAILQ_ENTRY(internal_list) next;
76 struct ifcvf_internal *internal;
79 TAILQ_HEAD(internal_list_head, internal_list);
80 static struct internal_list_head internal_list =
81 TAILQ_HEAD_INITIALIZER(internal_list);
83 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
85 static void update_used_ring(struct ifcvf_internal *internal, uint16_t qid);
87 static struct internal_list *
88 find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
91 struct internal_list *list;
93 pthread_mutex_lock(&internal_list_lock);
95 TAILQ_FOREACH(list, &internal_list, next) {
96 if (vdev == list->internal->vdev) {
102 pthread_mutex_unlock(&internal_list_lock);
110 static struct internal_list *
111 find_internal_resource_by_dev(struct rte_pci_device *pdev)
114 struct internal_list *list;
116 pthread_mutex_lock(&internal_list_lock);
118 TAILQ_FOREACH(list, &internal_list, next) {
119 if (!rte_pci_addr_cmp(&pdev->addr,
120 &list->internal->pdev->addr)) {
126 pthread_mutex_unlock(&internal_list_lock);
135 ifcvf_vfio_setup(struct ifcvf_internal *internal)
137 struct rte_pci_device *dev = internal->pdev;
138 char devname[RTE_DEV_NAME_MAX_LEN] = {0};
142 internal->vfio_dev_fd = -1;
143 internal->vfio_group_fd = -1;
144 internal->vfio_container_fd = -1;
146 rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
147 ret = rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
150 DRV_LOG(ERR, "%s failed to get IOMMU group", devname);
154 internal->vfio_container_fd = rte_vfio_container_create();
155 if (internal->vfio_container_fd < 0)
158 internal->vfio_group_fd = rte_vfio_container_group_bind(
159 internal->vfio_container_fd, iommu_group_num);
160 if (internal->vfio_group_fd < 0)
163 if (rte_pci_map_device(dev))
166 internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd;
168 for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
170 internal->hw.mem_resource[i].addr =
171 internal->pdev->mem_resource[i].addr;
172 internal->hw.mem_resource[i].phys_addr =
173 internal->pdev->mem_resource[i].phys_addr;
174 internal->hw.mem_resource[i].len =
175 internal->pdev->mem_resource[i].len;
181 rte_vfio_container_destroy(internal->vfio_container_fd);
186 ifcvf_dma_map(struct ifcvf_internal *internal, int do_map)
190 struct rte_vhost_memory *mem = NULL;
191 int vfio_container_fd;
193 ret = rte_vhost_get_mem_table(internal->vid, &mem);
195 DRV_LOG(ERR, "failed to get VM memory layout.");
199 vfio_container_fd = internal->vfio_container_fd;
201 for (i = 0; i < mem->nregions; i++) {
202 struct rte_vhost_mem_region *reg;
204 reg = &mem->regions[i];
205 DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", "
206 "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".",
207 do_map ? "DMA map" : "DMA unmap", i,
208 reg->host_user_addr, reg->guest_phys_addr, reg->size);
211 ret = rte_vfio_container_dma_map(vfio_container_fd,
212 reg->host_user_addr, reg->guest_phys_addr,
215 DRV_LOG(ERR, "DMA map failed.");
219 ret = rte_vfio_container_dma_unmap(vfio_container_fd,
220 reg->host_user_addr, reg->guest_phys_addr,
223 DRV_LOG(ERR, "DMA unmap failed.");
236 hva_to_gpa(int vid, uint64_t hva)
238 struct rte_vhost_memory *mem = NULL;
239 struct rte_vhost_mem_region *reg;
243 if (rte_vhost_get_mem_table(vid, &mem) < 0)
246 for (i = 0; i < mem->nregions; i++) {
247 reg = &mem->regions[i];
249 if (hva >= reg->host_user_addr &&
250 hva < reg->host_user_addr + reg->size) {
251 gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
263 vdpa_ifcvf_start(struct ifcvf_internal *internal)
265 struct ifcvf_hw *hw = &internal->hw;
268 struct rte_vhost_vring vq;
272 nr_vring = rte_vhost_get_vring_num(vid);
273 rte_vhost_get_negotiated_features(vid, &hw->req_features);
275 for (i = 0; i < nr_vring; i++) {
276 rte_vhost_get_vhost_vring(vid, i, &vq);
277 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
279 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
282 hw->vring[i].desc = gpa;
284 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
286 DRV_LOG(ERR, "Fail to get GPA for available ring.");
289 hw->vring[i].avail = gpa;
291 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
293 DRV_LOG(ERR, "Fail to get GPA for used ring.");
296 hw->vring[i].used = gpa;
298 hw->vring[i].size = vq.size;
299 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
300 &hw->vring[i].last_used_idx);
304 return ifcvf_start_hw(&internal->hw);
308 vdpa_ifcvf_stop(struct ifcvf_internal *internal)
310 struct ifcvf_hw *hw = &internal->hw;
313 uint64_t features = 0;
314 uint64_t log_base = 0, log_size = 0;
320 for (i = 0; i < hw->nr_vring; i++)
321 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
322 hw->vring[i].last_used_idx);
327 rte_vhost_get_negotiated_features(vid, &features);
328 if (RTE_VHOST_NEED_LOG(features)) {
329 ifcvf_disable_logging(hw);
330 rte_vhost_get_log_base(internal->vid, &log_base, &log_size);
331 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
332 log_base, IFCVF_LOG_BASE, log_size);
334 * IFCVF marks dirty memory pages for only packet buffer,
335 * SW helps to mark the used ring as dirty after device stops.
337 for (i = 0; i < hw->nr_vring; i++) {
338 len = IFCVF_USED_RING_LEN(hw->vring[i].size);
339 rte_vhost_log_used_vring(vid, i, 0, len);
344 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
345 sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
347 vdpa_enable_vfio_intr(struct ifcvf_internal *internal, bool m_rx)
350 uint32_t i, nr_vring;
351 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
352 struct vfio_irq_set *irq_set;
354 struct rte_vhost_vring vring;
359 nr_vring = rte_vhost_get_vring_num(internal->vid);
361 irq_set = (struct vfio_irq_set *)irq_set_buf;
362 irq_set->argsz = sizeof(irq_set_buf);
363 irq_set->count = nr_vring + 1;
364 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
365 VFIO_IRQ_SET_ACTION_TRIGGER;
366 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
368 fd_ptr = (int *)&irq_set->data;
369 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd;
371 for (i = 0; i < nr_vring; i++)
372 internal->intr_fd[i] = -1;
374 for (i = 0; i < nr_vring; i++) {
375 rte_vhost_get_vhost_vring(internal->vid, i, &vring);
376 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
377 if ((i & 1) == 0 && m_rx == true) {
378 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
380 DRV_LOG(ERR, "can't setup eventfd: %s",
384 internal->intr_fd[i] = fd;
385 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = fd;
389 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
391 DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s",
400 vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
403 uint32_t i, nr_vring;
404 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
405 struct vfio_irq_set *irq_set;
407 irq_set = (struct vfio_irq_set *)irq_set_buf;
408 irq_set->argsz = sizeof(irq_set_buf);
410 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
411 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
414 nr_vring = rte_vhost_get_vring_num(internal->vid);
415 for (i = 0; i < nr_vring; i++) {
416 if (internal->intr_fd[i] >= 0)
417 close(internal->intr_fd[i]);
418 internal->intr_fd[i] = -1;
421 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
423 DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
432 notify_relay(void *arg)
434 int i, kickfd, epfd, nfds = 0;
436 struct epoll_event events[IFCVF_MAX_QUEUES * 2];
437 struct epoll_event ev;
440 struct rte_vhost_vring vring;
441 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
442 struct ifcvf_hw *hw = &internal->hw;
444 q_num = rte_vhost_get_vring_num(internal->vid);
446 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
448 DRV_LOG(ERR, "failed to create epoll instance.");
451 internal->epfd = epfd;
454 for (qid = 0; qid < q_num; qid++) {
455 ev.events = EPOLLIN | EPOLLPRI;
456 rte_vhost_get_vhost_vring(internal->vid, qid, &vring);
457 ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
458 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
459 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
465 nfds = epoll_wait(epfd, events, q_num, -1);
469 DRV_LOG(ERR, "epoll_wait return fail\n");
473 for (i = 0; i < nfds; i++) {
474 qid = events[i].data.u32;
475 kickfd = (uint32_t)(events[i].data.u64 >> 32);
477 nbytes = read(kickfd, &buf, 8);
479 if (errno == EINTR ||
480 errno == EWOULDBLOCK ||
483 DRV_LOG(INFO, "Error reading "
490 ifcvf_notify_queue(hw, qid);
498 setup_notify_relay(struct ifcvf_internal *internal)
502 ret = pthread_create(&internal->tid, NULL, notify_relay,
505 DRV_LOG(ERR, "failed to create notify relay pthread.");
512 unset_notify_relay(struct ifcvf_internal *internal)
517 pthread_cancel(internal->tid);
518 pthread_join(internal->tid, &status);
522 if (internal->epfd >= 0)
523 close(internal->epfd);
530 update_datapath(struct ifcvf_internal *internal)
534 rte_spinlock_lock(&internal->lock);
536 if (!rte_atomic32_read(&internal->running) &&
537 (rte_atomic32_read(&internal->started) &&
538 rte_atomic32_read(&internal->dev_attached))) {
539 ret = ifcvf_dma_map(internal, 1);
543 ret = vdpa_enable_vfio_intr(internal, 0);
547 ret = vdpa_ifcvf_start(internal);
551 ret = setup_notify_relay(internal);
555 rte_atomic32_set(&internal->running, 1);
556 } else if (rte_atomic32_read(&internal->running) &&
557 (!rte_atomic32_read(&internal->started) ||
558 !rte_atomic32_read(&internal->dev_attached))) {
559 ret = unset_notify_relay(internal);
563 vdpa_ifcvf_stop(internal);
565 ret = vdpa_disable_vfio_intr(internal);
569 ret = ifcvf_dma_map(internal, 0);
573 rte_atomic32_set(&internal->running, 0);
576 rte_spinlock_unlock(&internal->lock);
579 rte_spinlock_unlock(&internal->lock);
584 m_ifcvf_start(struct ifcvf_internal *internal)
586 struct ifcvf_hw *hw = &internal->hw;
587 uint32_t i, nr_vring;
589 struct rte_vhost_vring vq;
591 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
595 memset(&vq, 0, sizeof(vq));
597 nr_vring = rte_vhost_get_vring_num(vid);
598 rte_vhost_get_negotiated_features(vid, &hw->req_features);
600 for (i = 0; i < nr_vring; i++) {
601 rte_vhost_get_vhost_vring(vid, i, &vq);
603 size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
605 vring_buf = rte_zmalloc("ifcvf", size, PAGE_SIZE);
606 vring_init(&internal->m_vring[i], vq.size, vring_buf,
609 ret = rte_vfio_container_dma_map(internal->vfio_container_fd,
610 (uint64_t)(uintptr_t)vring_buf, m_vring_iova, size);
612 DRV_LOG(ERR, "mediated vring DMA map failed.");
616 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
618 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
621 hw->vring[i].desc = gpa;
623 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
625 DRV_LOG(ERR, "Fail to get GPA for available ring.");
628 hw->vring[i].avail = gpa;
630 /* Direct I/O for Tx queue, relay for Rx queue */
632 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
634 DRV_LOG(ERR, "Fail to get GPA for used ring.");
637 hw->vring[i].used = gpa;
639 hw->vring[i].used = m_vring_iova +
640 (char *)internal->m_vring[i].used -
641 (char *)internal->m_vring[i].desc;
644 hw->vring[i].size = vq.size;
646 rte_vhost_get_vring_base(vid, i,
647 &internal->m_vring[i].avail->idx,
648 &internal->m_vring[i].used->idx);
650 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
651 &hw->vring[i].last_used_idx);
653 m_vring_iova += size;
655 hw->nr_vring = nr_vring;
657 return ifcvf_start_hw(&internal->hw);
660 for (i = 0; i < nr_vring; i++)
661 if (internal->m_vring[i].desc)
662 rte_free(internal->m_vring[i].desc);
668 m_ifcvf_stop(struct ifcvf_internal *internal)
672 struct rte_vhost_vring vq;
673 struct ifcvf_hw *hw = &internal->hw;
674 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
680 for (i = 0; i < hw->nr_vring; i++) {
681 /* synchronize remaining new used entries if any */
683 update_used_ring(internal, i);
685 rte_vhost_get_vhost_vring(vid, i, &vq);
686 len = IFCVF_USED_RING_LEN(vq.size);
687 rte_vhost_log_used_vring(vid, i, 0, len);
689 size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
691 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
692 (uint64_t)(uintptr_t)internal->m_vring[i].desc,
695 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
696 hw->vring[i].last_used_idx);
697 rte_free(internal->m_vring[i].desc);
698 m_vring_iova += size;
705 update_used_ring(struct ifcvf_internal *internal, uint16_t qid)
707 rte_vdpa_relay_vring_used(internal->vid, qid, &internal->m_vring[qid]);
708 rte_vhost_vring_call(internal->vid, qid);
712 vring_relay(void *arg)
714 int i, vid, epfd, fd, nfds;
715 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
716 struct rte_vhost_vring vring;
718 struct epoll_event events[IFCVF_MAX_QUEUES * 4];
719 struct epoll_event ev;
724 q_num = rte_vhost_get_vring_num(vid);
726 /* add notify fd and interrupt fd to epoll */
727 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
729 DRV_LOG(ERR, "failed to create epoll instance.");
732 internal->epfd = epfd;
735 for (qid = 0; qid < q_num; qid++) {
736 ev.events = EPOLLIN | EPOLLPRI;
737 rte_vhost_get_vhost_vring(vid, qid, &vring);
738 ev.data.u64 = qid << 1 | (uint64_t)vring.kickfd << 32;
739 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
740 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
745 for (qid = 0; qid < q_num; qid += 2) {
746 ev.events = EPOLLIN | EPOLLPRI;
747 /* leave a flag to mark it's for interrupt */
748 ev.data.u64 = 1 | qid << 1 |
749 (uint64_t)internal->intr_fd[qid] << 32;
750 if (epoll_ctl(epfd, EPOLL_CTL_ADD, internal->intr_fd[qid], &ev)
752 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
755 update_used_ring(internal, qid);
758 /* start relay with a first kick */
759 for (qid = 0; qid < q_num; qid++)
760 ifcvf_notify_queue(&internal->hw, qid);
762 /* listen to the events and react accordingly */
764 nfds = epoll_wait(epfd, events, q_num * 2, -1);
768 DRV_LOG(ERR, "epoll_wait return fail\n");
772 for (i = 0; i < nfds; i++) {
773 fd = (uint32_t)(events[i].data.u64 >> 32);
775 nbytes = read(fd, &buf, 8);
777 if (errno == EINTR ||
778 errno == EWOULDBLOCK ||
781 DRV_LOG(INFO, "Error reading "
788 qid = events[i].data.u32 >> 1;
790 if (events[i].data.u32 & 1)
791 update_used_ring(internal, qid);
793 ifcvf_notify_queue(&internal->hw, qid);
801 setup_vring_relay(struct ifcvf_internal *internal)
805 ret = pthread_create(&internal->tid, NULL, vring_relay,
808 DRV_LOG(ERR, "failed to create ring relay pthread.");
815 unset_vring_relay(struct ifcvf_internal *internal)
820 pthread_cancel(internal->tid);
821 pthread_join(internal->tid, &status);
825 if (internal->epfd >= 0)
826 close(internal->epfd);
833 ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal)
836 int vid = internal->vid;
838 /* stop the direct IO data path */
839 unset_notify_relay(internal);
840 vdpa_ifcvf_stop(internal);
841 vdpa_disable_vfio_intr(internal);
843 ret = rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, false);
844 if (ret && ret != -ENOTSUP)
847 /* set up interrupt for interrupt relay */
848 ret = vdpa_enable_vfio_intr(internal, 1);
853 ret = m_ifcvf_start(internal);
857 /* set up vring relay thread */
858 ret = setup_vring_relay(internal);
862 rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true);
864 internal->sw_fallback_running = true;
869 m_ifcvf_stop(internal);
871 vdpa_disable_vfio_intr(internal);
873 ifcvf_dma_map(internal, 0);
879 ifcvf_dev_config(int vid)
881 struct rte_vdpa_device *vdev;
882 struct internal_list *list;
883 struct ifcvf_internal *internal;
885 vdev = rte_vhost_get_vdpa_device(vid);
886 list = find_internal_resource_by_vdev(vdev);
888 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
892 internal = list->internal;
894 rte_atomic32_set(&internal->dev_attached, 1);
895 update_datapath(internal);
897 if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
898 DRV_LOG(NOTICE, "vDPA (%s): software relay is used.",
901 internal->configured = 1;
906 ifcvf_dev_close(int vid)
908 struct rte_vdpa_device *vdev;
909 struct internal_list *list;
910 struct ifcvf_internal *internal;
912 vdev = rte_vhost_get_vdpa_device(vid);
913 list = find_internal_resource_by_vdev(vdev);
915 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
919 internal = list->internal;
921 if (internal->sw_fallback_running) {
922 /* unset ring relay */
923 unset_vring_relay(internal);
926 m_ifcvf_stop(internal);
928 /* remove interrupt setting */
929 vdpa_disable_vfio_intr(internal);
931 /* unset DMA map for guest memory */
932 ifcvf_dma_map(internal, 0);
934 internal->sw_fallback_running = false;
936 rte_atomic32_set(&internal->dev_attached, 0);
937 update_datapath(internal);
940 internal->configured = 0;
945 ifcvf_set_features(int vid)
947 uint64_t features = 0;
948 struct rte_vdpa_device *vdev;
949 struct internal_list *list;
950 struct ifcvf_internal *internal;
951 uint64_t log_base = 0, log_size = 0;
953 vdev = rte_vhost_get_vdpa_device(vid);
954 list = find_internal_resource_by_vdev(vdev);
956 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
960 internal = list->internal;
961 rte_vhost_get_negotiated_features(vid, &features);
963 if (!RTE_VHOST_NEED_LOG(features))
966 if (internal->sw_lm) {
967 ifcvf_sw_fallback_switchover(internal);
969 rte_vhost_get_log_base(vid, &log_base, &log_size);
970 rte_vfio_container_dma_map(internal->vfio_container_fd,
971 log_base, IFCVF_LOG_BASE, log_size);
972 ifcvf_enable_logging(&internal->hw, IFCVF_LOG_BASE, log_size);
979 ifcvf_get_vfio_group_fd(int vid)
981 struct rte_vdpa_device *vdev;
982 struct internal_list *list;
984 vdev = rte_vhost_get_vdpa_device(vid);
985 list = find_internal_resource_by_vdev(vdev);
987 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
991 return list->internal->vfio_group_fd;
995 ifcvf_get_vfio_device_fd(int vid)
997 struct rte_vdpa_device *vdev;
998 struct internal_list *list;
1000 vdev = rte_vhost_get_vdpa_device(vid);
1001 list = find_internal_resource_by_vdev(vdev);
1003 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1007 return list->internal->vfio_dev_fd;
1011 ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
1013 struct rte_vdpa_device *vdev;
1014 struct internal_list *list;
1015 struct ifcvf_internal *internal;
1016 struct vfio_region_info reg = { .argsz = sizeof(reg) };
1019 vdev = rte_vhost_get_vdpa_device(vid);
1020 list = find_internal_resource_by_vdev(vdev);
1022 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1026 internal = list->internal;
1028 reg.index = ifcvf_get_notify_region(&internal->hw);
1029 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
1031 DRV_LOG(ERR, "Get not get device region info: %s",
1036 *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset;
1043 ifcvf_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
1045 struct internal_list *list;
1047 list = find_internal_resource_by_vdev(vdev);
1049 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1053 *queue_num = list->internal->max_queues;
1059 ifcvf_get_vdpa_features(struct rte_vdpa_device *vdev, uint64_t *features)
1061 struct internal_list *list;
1063 list = find_internal_resource_by_vdev(vdev);
1065 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1069 *features = list->internal->features;
1074 #define VDPA_SUPPORTED_PROTOCOL_FEATURES \
1075 (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
1076 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
1077 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
1078 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
1079 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | \
1080 1ULL << VHOST_USER_PROTOCOL_F_STATUS)
1082 ifcvf_get_protocol_features(struct rte_vdpa_device *vdev, uint64_t *features)
1086 *features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
1091 ifcvf_set_vring_state(int vid, int vring, int state)
1093 struct rte_vdpa_device *vdev;
1094 struct internal_list *list;
1095 struct ifcvf_internal *internal;
1096 struct ifcvf_hw *hw;
1097 struct ifcvf_pci_common_cfg *cfg;
1100 vdev = rte_vhost_get_vdpa_device(vid);
1101 list = find_internal_resource_by_vdev(vdev);
1103 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1107 internal = list->internal;
1108 if (vring < 0 || vring >= internal->max_queues * 2) {
1109 DRV_LOG(ERR, "Vring index %d not correct", vring);
1114 if (!internal->configured)
1117 cfg = hw->common_cfg;
1118 IFCVF_WRITE_REG16(vring, &cfg->queue_select);
1119 IFCVF_WRITE_REG16(!!state, &cfg->queue_enable);
1121 if (!state && hw->vring[vring].enable) {
1122 ret = vdpa_disable_vfio_intr(internal);
1127 if (state && !hw->vring[vring].enable) {
1128 ret = vdpa_enable_vfio_intr(internal, 0);
1134 hw->vring[vring].enable = !!state;
1138 static struct rte_vdpa_dev_ops ifcvf_ops = {
1139 .get_queue_num = ifcvf_get_queue_num,
1140 .get_features = ifcvf_get_vdpa_features,
1141 .get_protocol_features = ifcvf_get_protocol_features,
1142 .dev_conf = ifcvf_dev_config,
1143 .dev_close = ifcvf_dev_close,
1144 .set_vring_state = ifcvf_set_vring_state,
1145 .set_features = ifcvf_set_features,
1146 .migration_done = NULL,
1147 .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
1148 .get_vfio_device_fd = ifcvf_get_vfio_device_fd,
1149 .get_notify_area = ifcvf_get_notify_area,
1153 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1155 uint16_t *n = extra_args;
1157 if (value == NULL || extra_args == NULL)
1160 *n = (uint16_t)strtoul(value, NULL, 0);
1161 if (*n == USHRT_MAX && errno == ERANGE)
1168 ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1169 struct rte_pci_device *pci_dev)
1172 struct ifcvf_internal *internal = NULL;
1173 struct internal_list *list = NULL;
1175 int sw_fallback_lm = 0;
1176 struct rte_kvargs *kvlist = NULL;
1179 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1182 if (!pci_dev->device.devargs)
1185 kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
1186 ifcvf_valid_arguments);
1190 /* probe only when vdpa mode is specified */
1191 if (rte_kvargs_count(kvlist, IFCVF_VDPA_MODE) == 0) {
1192 rte_kvargs_free(kvlist);
1196 ret = rte_kvargs_process(kvlist, IFCVF_VDPA_MODE, &open_int,
1198 if (ret < 0 || vdpa_mode == 0) {
1199 rte_kvargs_free(kvlist);
1203 list = rte_zmalloc("ifcvf", sizeof(*list), 0);
1207 internal = rte_zmalloc("ifcvf", sizeof(*internal), 0);
1208 if (internal == NULL)
1211 internal->pdev = pci_dev;
1212 rte_spinlock_init(&internal->lock);
1214 if (ifcvf_vfio_setup(internal) < 0) {
1215 DRV_LOG(ERR, "failed to setup device %s", pci_dev->name);
1219 if (ifcvf_init_hw(&internal->hw, internal->pdev) < 0) {
1220 DRV_LOG(ERR, "failed to init device %s", pci_dev->name);
1224 internal->configured = 0;
1225 internal->max_queues = IFCVF_MAX_QUEUES;
1226 features = ifcvf_get_features(&internal->hw);
1227 internal->features = (features &
1228 ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
1229 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
1230 (1ULL << VIRTIO_NET_F_CTRL_VQ) |
1231 (1ULL << VIRTIO_NET_F_STATUS) |
1232 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
1233 (1ULL << VHOST_F_LOG_ALL);
1235 list->internal = internal;
1237 if (rte_kvargs_count(kvlist, IFCVF_SW_FALLBACK_LM)) {
1238 ret = rte_kvargs_process(kvlist, IFCVF_SW_FALLBACK_LM,
1239 &open_int, &sw_fallback_lm);
1243 internal->sw_lm = sw_fallback_lm;
1245 internal->vdev = rte_vdpa_register_device(&pci_dev->device, &ifcvf_ops);
1246 if (internal->vdev == NULL) {
1247 DRV_LOG(ERR, "failed to register device %s", pci_dev->name);
1251 pthread_mutex_lock(&internal_list_lock);
1252 TAILQ_INSERT_TAIL(&internal_list, list, next);
1253 pthread_mutex_unlock(&internal_list_lock);
1255 rte_atomic32_set(&internal->started, 1);
1256 update_datapath(internal);
1258 rte_kvargs_free(kvlist);
1262 rte_kvargs_free(kvlist);
1269 ifcvf_pci_remove(struct rte_pci_device *pci_dev)
1271 struct ifcvf_internal *internal;
1272 struct internal_list *list;
1274 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1277 list = find_internal_resource_by_dev(pci_dev);
1279 DRV_LOG(ERR, "Invalid device: %s", pci_dev->name);
1283 internal = list->internal;
1284 rte_atomic32_set(&internal->started, 0);
1285 update_datapath(internal);
1287 rte_pci_unmap_device(internal->pdev);
1288 rte_vfio_container_destroy(internal->vfio_container_fd);
1289 rte_vdpa_unregister_device(internal->vdev);
1291 pthread_mutex_lock(&internal_list_lock);
1292 TAILQ_REMOVE(&internal_list, list, next);
1293 pthread_mutex_unlock(&internal_list_lock);
1302 * IFCVF has the same vendor ID and device ID as virtio net PCI
1303 * device, with its specific subsystem vendor ID and device ID.
1305 static const struct rte_pci_id pci_id_ifcvf_map[] = {
1306 { .class_id = RTE_CLASS_ANY_ID,
1307 .vendor_id = IFCVF_VENDOR_ID,
1308 .device_id = IFCVF_DEVICE_ID,
1309 .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
1310 .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
1313 { .vendor_id = 0, /* sentinel */
1317 static struct rte_pci_driver rte_ifcvf_vdpa = {
1318 .id_table = pci_id_ifcvf_map,
1320 .probe = ifcvf_pci_probe,
1321 .remove = ifcvf_pci_remove,
1324 RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
1325 RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
1326 RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");