1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <sys/epoll.h>
11 #include <linux/virtio_net.h>
14 #include <rte_eal_paging.h>
15 #include <rte_malloc.h>
16 #include <rte_memory.h>
17 #include <rte_bus_pci.h>
18 #include <rte_vhost.h>
20 #include <rte_vdpa_dev.h>
22 #include <rte_spinlock.h>
24 #include <rte_kvargs.h>
25 #include <rte_devargs.h>
27 #include "base/ifcvf.h"
29 RTE_LOG_REGISTER(ifcvf_vdpa_logtype, pmd.vdpa.ifcvf, NOTICE);
30 #define DRV_LOG(level, fmt, args...) \
31 rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
32 "IFCVF %s(): " fmt "\n", __func__, ##args)
34 #define IFCVF_USED_RING_LEN(size) \
35 ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
37 #define IFCVF_VDPA_MODE "vdpa"
38 #define IFCVF_SW_FALLBACK_LM "sw-live-migration"
40 static const char * const ifcvf_valid_arguments[] = {
46 struct ifcvf_internal {
47 struct rte_pci_device *pdev;
50 int vfio_container_fd;
53 pthread_t tid; /* thread for notify relay */
56 struct rte_vdpa_device *vdev;
59 rte_atomic32_t started;
60 rte_atomic32_t dev_attached;
61 rte_atomic32_t running;
64 bool sw_fallback_running;
65 /* mediated vring for sw fallback */
66 struct vring m_vring[IFCVF_MAX_QUEUES * 2];
67 /* eventfd for used ring interrupt */
68 int intr_fd[IFCVF_MAX_QUEUES * 2];
71 struct internal_list {
72 TAILQ_ENTRY(internal_list) next;
73 struct ifcvf_internal *internal;
76 TAILQ_HEAD(internal_list_head, internal_list);
77 static struct internal_list_head internal_list =
78 TAILQ_HEAD_INITIALIZER(internal_list);
80 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
82 static void update_used_ring(struct ifcvf_internal *internal, uint16_t qid);
84 static struct internal_list *
85 find_internal_resource_by_vdev(struct rte_vdpa_device *vdev)
88 struct internal_list *list;
90 pthread_mutex_lock(&internal_list_lock);
92 TAILQ_FOREACH(list, &internal_list, next) {
93 if (vdev == list->internal->vdev) {
99 pthread_mutex_unlock(&internal_list_lock);
107 static struct internal_list *
108 find_internal_resource_by_dev(struct rte_pci_device *pdev)
111 struct internal_list *list;
113 pthread_mutex_lock(&internal_list_lock);
115 TAILQ_FOREACH(list, &internal_list, next) {
116 if (!rte_pci_addr_cmp(&pdev->addr,
117 &list->internal->pdev->addr)) {
123 pthread_mutex_unlock(&internal_list_lock);
132 ifcvf_vfio_setup(struct ifcvf_internal *internal)
134 struct rte_pci_device *dev = internal->pdev;
135 char devname[RTE_DEV_NAME_MAX_LEN] = {0};
139 internal->vfio_dev_fd = -1;
140 internal->vfio_group_fd = -1;
141 internal->vfio_container_fd = -1;
143 rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
144 ret = rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
147 DRV_LOG(ERR, "%s failed to get IOMMU group", devname);
151 internal->vfio_container_fd = rte_vfio_container_create();
152 if (internal->vfio_container_fd < 0)
155 internal->vfio_group_fd = rte_vfio_container_group_bind(
156 internal->vfio_container_fd, iommu_group_num);
157 if (internal->vfio_group_fd < 0)
160 if (rte_pci_map_device(dev))
163 internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd;
165 for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
167 internal->hw.mem_resource[i].addr =
168 internal->pdev->mem_resource[i].addr;
169 internal->hw.mem_resource[i].phys_addr =
170 internal->pdev->mem_resource[i].phys_addr;
171 internal->hw.mem_resource[i].len =
172 internal->pdev->mem_resource[i].len;
178 rte_vfio_container_destroy(internal->vfio_container_fd);
183 ifcvf_dma_map(struct ifcvf_internal *internal, int do_map)
187 struct rte_vhost_memory *mem = NULL;
188 int vfio_container_fd;
190 ret = rte_vhost_get_mem_table(internal->vid, &mem);
192 DRV_LOG(ERR, "failed to get VM memory layout.");
196 vfio_container_fd = internal->vfio_container_fd;
198 for (i = 0; i < mem->nregions; i++) {
199 struct rte_vhost_mem_region *reg;
201 reg = &mem->regions[i];
202 DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", "
203 "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".",
204 do_map ? "DMA map" : "DMA unmap", i,
205 reg->host_user_addr, reg->guest_phys_addr, reg->size);
208 ret = rte_vfio_container_dma_map(vfio_container_fd,
209 reg->host_user_addr, reg->guest_phys_addr,
212 DRV_LOG(ERR, "DMA map failed.");
216 ret = rte_vfio_container_dma_unmap(vfio_container_fd,
217 reg->host_user_addr, reg->guest_phys_addr,
220 DRV_LOG(ERR, "DMA unmap failed.");
233 hva_to_gpa(int vid, uint64_t hva)
235 struct rte_vhost_memory *mem = NULL;
236 struct rte_vhost_mem_region *reg;
240 if (rte_vhost_get_mem_table(vid, &mem) < 0)
243 for (i = 0; i < mem->nregions; i++) {
244 reg = &mem->regions[i];
246 if (hva >= reg->host_user_addr &&
247 hva < reg->host_user_addr + reg->size) {
248 gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
260 vdpa_ifcvf_start(struct ifcvf_internal *internal)
262 struct ifcvf_hw *hw = &internal->hw;
265 struct rte_vhost_vring vq;
269 nr_vring = rte_vhost_get_vring_num(vid);
270 rte_vhost_get_negotiated_features(vid, &hw->req_features);
272 for (i = 0; i < nr_vring; i++) {
273 rte_vhost_get_vhost_vring(vid, i, &vq);
274 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
276 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
279 hw->vring[i].desc = gpa;
281 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
283 DRV_LOG(ERR, "Fail to get GPA for available ring.");
286 hw->vring[i].avail = gpa;
288 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
290 DRV_LOG(ERR, "Fail to get GPA for used ring.");
293 hw->vring[i].used = gpa;
295 hw->vring[i].size = vq.size;
296 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
297 &hw->vring[i].last_used_idx);
301 return ifcvf_start_hw(&internal->hw);
305 vdpa_ifcvf_stop(struct ifcvf_internal *internal)
307 struct ifcvf_hw *hw = &internal->hw;
310 uint64_t features = 0;
311 uint64_t log_base = 0, log_size = 0;
317 for (i = 0; i < hw->nr_vring; i++)
318 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
319 hw->vring[i].last_used_idx);
324 rte_vhost_get_negotiated_features(vid, &features);
325 if (RTE_VHOST_NEED_LOG(features)) {
326 ifcvf_disable_logging(hw);
327 rte_vhost_get_log_base(internal->vid, &log_base, &log_size);
328 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
329 log_base, IFCVF_LOG_BASE, log_size);
331 * IFCVF marks dirty memory pages for only packet buffer,
332 * SW helps to mark the used ring as dirty after device stops.
334 for (i = 0; i < hw->nr_vring; i++) {
335 len = IFCVF_USED_RING_LEN(hw->vring[i].size);
336 rte_vhost_log_used_vring(vid, i, 0, len);
341 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
342 sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
344 vdpa_enable_vfio_intr(struct ifcvf_internal *internal, bool m_rx)
347 uint32_t i, nr_vring;
348 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
349 struct vfio_irq_set *irq_set;
351 struct rte_vhost_vring vring;
356 nr_vring = rte_vhost_get_vring_num(internal->vid);
358 irq_set = (struct vfio_irq_set *)irq_set_buf;
359 irq_set->argsz = sizeof(irq_set_buf);
360 irq_set->count = nr_vring + 1;
361 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
362 VFIO_IRQ_SET_ACTION_TRIGGER;
363 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
365 fd_ptr = (int *)&irq_set->data;
366 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd;
368 for (i = 0; i < nr_vring; i++)
369 internal->intr_fd[i] = -1;
371 for (i = 0; i < nr_vring; i++) {
372 rte_vhost_get_vhost_vring(internal->vid, i, &vring);
373 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
374 if ((i & 1) == 0 && m_rx == true) {
375 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
377 DRV_LOG(ERR, "can't setup eventfd: %s",
381 internal->intr_fd[i] = fd;
382 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = fd;
386 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
388 DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s",
397 vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
400 uint32_t i, nr_vring;
401 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
402 struct vfio_irq_set *irq_set;
404 irq_set = (struct vfio_irq_set *)irq_set_buf;
405 irq_set->argsz = sizeof(irq_set_buf);
407 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
408 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
411 nr_vring = rte_vhost_get_vring_num(internal->vid);
412 for (i = 0; i < nr_vring; i++) {
413 if (internal->intr_fd[i] >= 0)
414 close(internal->intr_fd[i]);
415 internal->intr_fd[i] = -1;
418 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
420 DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
429 notify_relay(void *arg)
431 int i, kickfd, epfd, nfds = 0;
433 struct epoll_event events[IFCVF_MAX_QUEUES * 2];
434 struct epoll_event ev;
437 struct rte_vhost_vring vring;
438 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
439 struct ifcvf_hw *hw = &internal->hw;
441 q_num = rte_vhost_get_vring_num(internal->vid);
443 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
445 DRV_LOG(ERR, "failed to create epoll instance.");
448 internal->epfd = epfd;
451 for (qid = 0; qid < q_num; qid++) {
452 ev.events = EPOLLIN | EPOLLPRI;
453 rte_vhost_get_vhost_vring(internal->vid, qid, &vring);
454 ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
455 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
456 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
462 nfds = epoll_wait(epfd, events, q_num, -1);
466 DRV_LOG(ERR, "epoll_wait return fail\n");
470 for (i = 0; i < nfds; i++) {
471 qid = events[i].data.u32;
472 kickfd = (uint32_t)(events[i].data.u64 >> 32);
474 nbytes = read(kickfd, &buf, 8);
476 if (errno == EINTR ||
477 errno == EWOULDBLOCK ||
480 DRV_LOG(INFO, "Error reading "
487 ifcvf_notify_queue(hw, qid);
495 setup_notify_relay(struct ifcvf_internal *internal)
499 ret = pthread_create(&internal->tid, NULL, notify_relay,
502 DRV_LOG(ERR, "failed to create notify relay pthread.");
509 unset_notify_relay(struct ifcvf_internal *internal)
514 pthread_cancel(internal->tid);
515 pthread_join(internal->tid, &status);
519 if (internal->epfd >= 0)
520 close(internal->epfd);
527 update_datapath(struct ifcvf_internal *internal)
531 rte_spinlock_lock(&internal->lock);
533 if (!rte_atomic32_read(&internal->running) &&
534 (rte_atomic32_read(&internal->started) &&
535 rte_atomic32_read(&internal->dev_attached))) {
536 ret = ifcvf_dma_map(internal, 1);
540 ret = vdpa_enable_vfio_intr(internal, 0);
544 ret = vdpa_ifcvf_start(internal);
548 ret = setup_notify_relay(internal);
552 rte_atomic32_set(&internal->running, 1);
553 } else if (rte_atomic32_read(&internal->running) &&
554 (!rte_atomic32_read(&internal->started) ||
555 !rte_atomic32_read(&internal->dev_attached))) {
556 ret = unset_notify_relay(internal);
560 vdpa_ifcvf_stop(internal);
562 ret = vdpa_disable_vfio_intr(internal);
566 ret = ifcvf_dma_map(internal, 0);
570 rte_atomic32_set(&internal->running, 0);
573 rte_spinlock_unlock(&internal->lock);
576 rte_spinlock_unlock(&internal->lock);
581 m_ifcvf_start(struct ifcvf_internal *internal)
583 struct ifcvf_hw *hw = &internal->hw;
584 uint32_t i, nr_vring;
586 struct rte_vhost_vring vq;
588 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
592 memset(&vq, 0, sizeof(vq));
594 nr_vring = rte_vhost_get_vring_num(vid);
595 rte_vhost_get_negotiated_features(vid, &hw->req_features);
597 for (i = 0; i < nr_vring; i++) {
598 rte_vhost_get_vhost_vring(vid, i, &vq);
600 size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
601 rte_mem_page_size());
602 vring_buf = rte_zmalloc("ifcvf", size, rte_mem_page_size());
603 vring_init(&internal->m_vring[i], vq.size, vring_buf,
604 rte_mem_page_size());
606 ret = rte_vfio_container_dma_map(internal->vfio_container_fd,
607 (uint64_t)(uintptr_t)vring_buf, m_vring_iova, size);
609 DRV_LOG(ERR, "mediated vring DMA map failed.");
613 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
615 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
618 hw->vring[i].desc = gpa;
620 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
622 DRV_LOG(ERR, "Fail to get GPA for available ring.");
625 hw->vring[i].avail = gpa;
627 /* Direct I/O for Tx queue, relay for Rx queue */
629 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
631 DRV_LOG(ERR, "Fail to get GPA for used ring.");
634 hw->vring[i].used = gpa;
636 hw->vring[i].used = m_vring_iova +
637 (char *)internal->m_vring[i].used -
638 (char *)internal->m_vring[i].desc;
641 hw->vring[i].size = vq.size;
643 rte_vhost_get_vring_base(vid, i,
644 &internal->m_vring[i].avail->idx,
645 &internal->m_vring[i].used->idx);
647 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
648 &hw->vring[i].last_used_idx);
650 m_vring_iova += size;
652 hw->nr_vring = nr_vring;
654 return ifcvf_start_hw(&internal->hw);
657 for (i = 0; i < nr_vring; i++)
658 if (internal->m_vring[i].desc)
659 rte_free(internal->m_vring[i].desc);
665 m_ifcvf_stop(struct ifcvf_internal *internal)
669 struct rte_vhost_vring vq;
670 struct ifcvf_hw *hw = &internal->hw;
671 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
677 for (i = 0; i < hw->nr_vring; i++) {
678 /* synchronize remaining new used entries if any */
680 update_used_ring(internal, i);
682 rte_vhost_get_vhost_vring(vid, i, &vq);
683 len = IFCVF_USED_RING_LEN(vq.size);
684 rte_vhost_log_used_vring(vid, i, 0, len);
686 size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
687 rte_mem_page_size());
688 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
689 (uint64_t)(uintptr_t)internal->m_vring[i].desc,
692 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
693 hw->vring[i].last_used_idx);
694 rte_free(internal->m_vring[i].desc);
695 m_vring_iova += size;
702 update_used_ring(struct ifcvf_internal *internal, uint16_t qid)
704 rte_vdpa_relay_vring_used(internal->vid, qid, &internal->m_vring[qid]);
705 rte_vhost_vring_call(internal->vid, qid);
709 vring_relay(void *arg)
711 int i, vid, epfd, fd, nfds;
712 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
713 struct rte_vhost_vring vring;
715 struct epoll_event events[IFCVF_MAX_QUEUES * 4];
716 struct epoll_event ev;
721 q_num = rte_vhost_get_vring_num(vid);
723 /* add notify fd and interrupt fd to epoll */
724 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
726 DRV_LOG(ERR, "failed to create epoll instance.");
729 internal->epfd = epfd;
732 for (qid = 0; qid < q_num; qid++) {
733 ev.events = EPOLLIN | EPOLLPRI;
734 rte_vhost_get_vhost_vring(vid, qid, &vring);
735 ev.data.u64 = qid << 1 | (uint64_t)vring.kickfd << 32;
736 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
737 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
742 for (qid = 0; qid < q_num; qid += 2) {
743 ev.events = EPOLLIN | EPOLLPRI;
744 /* leave a flag to mark it's for interrupt */
745 ev.data.u64 = 1 | qid << 1 |
746 (uint64_t)internal->intr_fd[qid] << 32;
747 if (epoll_ctl(epfd, EPOLL_CTL_ADD, internal->intr_fd[qid], &ev)
749 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
752 update_used_ring(internal, qid);
755 /* start relay with a first kick */
756 for (qid = 0; qid < q_num; qid++)
757 ifcvf_notify_queue(&internal->hw, qid);
759 /* listen to the events and react accordingly */
761 nfds = epoll_wait(epfd, events, q_num * 2, -1);
765 DRV_LOG(ERR, "epoll_wait return fail\n");
769 for (i = 0; i < nfds; i++) {
770 fd = (uint32_t)(events[i].data.u64 >> 32);
772 nbytes = read(fd, &buf, 8);
774 if (errno == EINTR ||
775 errno == EWOULDBLOCK ||
778 DRV_LOG(INFO, "Error reading "
785 qid = events[i].data.u32 >> 1;
787 if (events[i].data.u32 & 1)
788 update_used_ring(internal, qid);
790 ifcvf_notify_queue(&internal->hw, qid);
798 setup_vring_relay(struct ifcvf_internal *internal)
802 ret = pthread_create(&internal->tid, NULL, vring_relay,
805 DRV_LOG(ERR, "failed to create ring relay pthread.");
812 unset_vring_relay(struct ifcvf_internal *internal)
817 pthread_cancel(internal->tid);
818 pthread_join(internal->tid, &status);
822 if (internal->epfd >= 0)
823 close(internal->epfd);
830 ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal)
833 int vid = internal->vid;
835 /* stop the direct IO data path */
836 unset_notify_relay(internal);
837 vdpa_ifcvf_stop(internal);
838 vdpa_disable_vfio_intr(internal);
840 ret = rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, false);
841 if (ret && ret != -ENOTSUP)
844 /* set up interrupt for interrupt relay */
845 ret = vdpa_enable_vfio_intr(internal, 1);
850 ret = m_ifcvf_start(internal);
854 /* set up vring relay thread */
855 ret = setup_vring_relay(internal);
859 rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true);
861 internal->sw_fallback_running = true;
866 m_ifcvf_stop(internal);
868 vdpa_disable_vfio_intr(internal);
870 ifcvf_dma_map(internal, 0);
876 ifcvf_dev_config(int vid)
878 struct rte_vdpa_device *vdev;
879 struct internal_list *list;
880 struct ifcvf_internal *internal;
882 vdev = rte_vhost_get_vdpa_device(vid);
883 list = find_internal_resource_by_vdev(vdev);
885 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
889 internal = list->internal;
891 rte_atomic32_set(&internal->dev_attached, 1);
892 update_datapath(internal);
894 if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
895 DRV_LOG(NOTICE, "vDPA (%s): software relay is used.",
898 internal->configured = 1;
903 ifcvf_dev_close(int vid)
905 struct rte_vdpa_device *vdev;
906 struct internal_list *list;
907 struct ifcvf_internal *internal;
909 vdev = rte_vhost_get_vdpa_device(vid);
910 list = find_internal_resource_by_vdev(vdev);
912 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
916 internal = list->internal;
918 if (internal->sw_fallback_running) {
919 /* unset ring relay */
920 unset_vring_relay(internal);
923 m_ifcvf_stop(internal);
925 /* remove interrupt setting */
926 vdpa_disable_vfio_intr(internal);
928 /* unset DMA map for guest memory */
929 ifcvf_dma_map(internal, 0);
931 internal->sw_fallback_running = false;
933 rte_atomic32_set(&internal->dev_attached, 0);
934 update_datapath(internal);
937 internal->configured = 0;
942 ifcvf_set_features(int vid)
944 uint64_t features = 0;
945 struct rte_vdpa_device *vdev;
946 struct internal_list *list;
947 struct ifcvf_internal *internal;
948 uint64_t log_base = 0, log_size = 0;
950 vdev = rte_vhost_get_vdpa_device(vid);
951 list = find_internal_resource_by_vdev(vdev);
953 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
957 internal = list->internal;
958 rte_vhost_get_negotiated_features(vid, &features);
960 if (!RTE_VHOST_NEED_LOG(features))
963 if (internal->sw_lm) {
964 ifcvf_sw_fallback_switchover(internal);
966 rte_vhost_get_log_base(vid, &log_base, &log_size);
967 rte_vfio_container_dma_map(internal->vfio_container_fd,
968 log_base, IFCVF_LOG_BASE, log_size);
969 ifcvf_enable_logging(&internal->hw, IFCVF_LOG_BASE, log_size);
976 ifcvf_get_vfio_group_fd(int vid)
978 struct rte_vdpa_device *vdev;
979 struct internal_list *list;
981 vdev = rte_vhost_get_vdpa_device(vid);
982 list = find_internal_resource_by_vdev(vdev);
984 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
988 return list->internal->vfio_group_fd;
992 ifcvf_get_vfio_device_fd(int vid)
994 struct rte_vdpa_device *vdev;
995 struct internal_list *list;
997 vdev = rte_vhost_get_vdpa_device(vid);
998 list = find_internal_resource_by_vdev(vdev);
1000 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1004 return list->internal->vfio_dev_fd;
1008 ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
1010 struct rte_vdpa_device *vdev;
1011 struct internal_list *list;
1012 struct ifcvf_internal *internal;
1013 struct vfio_region_info reg = { .argsz = sizeof(reg) };
1016 vdev = rte_vhost_get_vdpa_device(vid);
1017 list = find_internal_resource_by_vdev(vdev);
1019 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1023 internal = list->internal;
1025 reg.index = ifcvf_get_notify_region(&internal->hw);
1026 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
1028 DRV_LOG(ERR, "Get not get device region info: %s",
1033 *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset;
1040 ifcvf_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
1042 struct internal_list *list;
1044 list = find_internal_resource_by_vdev(vdev);
1046 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1050 *queue_num = list->internal->max_queues;
1056 ifcvf_get_vdpa_features(struct rte_vdpa_device *vdev, uint64_t *features)
1058 struct internal_list *list;
1060 list = find_internal_resource_by_vdev(vdev);
1062 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1066 *features = list->internal->features;
1071 #define VDPA_SUPPORTED_PROTOCOL_FEATURES \
1072 (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
1073 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
1074 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
1075 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
1076 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | \
1077 1ULL << VHOST_USER_PROTOCOL_F_STATUS)
1079 ifcvf_get_protocol_features(struct rte_vdpa_device *vdev, uint64_t *features)
1083 *features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
1088 ifcvf_set_vring_state(int vid, int vring, int state)
1090 struct rte_vdpa_device *vdev;
1091 struct internal_list *list;
1092 struct ifcvf_internal *internal;
1093 struct ifcvf_hw *hw;
1094 struct ifcvf_pci_common_cfg *cfg;
1097 vdev = rte_vhost_get_vdpa_device(vid);
1098 list = find_internal_resource_by_vdev(vdev);
1100 DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
1104 internal = list->internal;
1105 if (vring < 0 || vring >= internal->max_queues * 2) {
1106 DRV_LOG(ERR, "Vring index %d not correct", vring);
1111 if (!internal->configured)
1114 cfg = hw->common_cfg;
1115 IFCVF_WRITE_REG16(vring, &cfg->queue_select);
1116 IFCVF_WRITE_REG16(!!state, &cfg->queue_enable);
1118 if (!state && hw->vring[vring].enable) {
1119 ret = vdpa_disable_vfio_intr(internal);
1124 if (state && !hw->vring[vring].enable) {
1125 ret = vdpa_enable_vfio_intr(internal, 0);
1131 hw->vring[vring].enable = !!state;
1135 static struct rte_vdpa_dev_ops ifcvf_ops = {
1136 .get_queue_num = ifcvf_get_queue_num,
1137 .get_features = ifcvf_get_vdpa_features,
1138 .get_protocol_features = ifcvf_get_protocol_features,
1139 .dev_conf = ifcvf_dev_config,
1140 .dev_close = ifcvf_dev_close,
1141 .set_vring_state = ifcvf_set_vring_state,
1142 .set_features = ifcvf_set_features,
1143 .migration_done = NULL,
1144 .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
1145 .get_vfio_device_fd = ifcvf_get_vfio_device_fd,
1146 .get_notify_area = ifcvf_get_notify_area,
1150 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1152 uint16_t *n = extra_args;
1154 if (value == NULL || extra_args == NULL)
1157 *n = (uint16_t)strtoul(value, NULL, 0);
1158 if (*n == USHRT_MAX && errno == ERANGE)
1165 ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1166 struct rte_pci_device *pci_dev)
1169 struct ifcvf_internal *internal = NULL;
1170 struct internal_list *list = NULL;
1172 int sw_fallback_lm = 0;
1173 struct rte_kvargs *kvlist = NULL;
1176 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1179 if (!pci_dev->device.devargs)
1182 kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
1183 ifcvf_valid_arguments);
1187 /* probe only when vdpa mode is specified */
1188 if (rte_kvargs_count(kvlist, IFCVF_VDPA_MODE) == 0) {
1189 rte_kvargs_free(kvlist);
1193 ret = rte_kvargs_process(kvlist, IFCVF_VDPA_MODE, &open_int,
1195 if (ret < 0 || vdpa_mode == 0) {
1196 rte_kvargs_free(kvlist);
1200 list = rte_zmalloc("ifcvf", sizeof(*list), 0);
1204 internal = rte_zmalloc("ifcvf", sizeof(*internal), 0);
1205 if (internal == NULL)
1208 internal->pdev = pci_dev;
1209 rte_spinlock_init(&internal->lock);
1211 if (ifcvf_vfio_setup(internal) < 0) {
1212 DRV_LOG(ERR, "failed to setup device %s", pci_dev->name);
1216 if (ifcvf_init_hw(&internal->hw, internal->pdev) < 0) {
1217 DRV_LOG(ERR, "failed to init device %s", pci_dev->name);
1221 internal->configured = 0;
1222 internal->max_queues = IFCVF_MAX_QUEUES;
1223 features = ifcvf_get_features(&internal->hw);
1224 internal->features = (features &
1225 ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
1226 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
1227 (1ULL << VIRTIO_NET_F_CTRL_VQ) |
1228 (1ULL << VIRTIO_NET_F_STATUS) |
1229 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
1230 (1ULL << VHOST_F_LOG_ALL);
1232 list->internal = internal;
1234 if (rte_kvargs_count(kvlist, IFCVF_SW_FALLBACK_LM)) {
1235 ret = rte_kvargs_process(kvlist, IFCVF_SW_FALLBACK_LM,
1236 &open_int, &sw_fallback_lm);
1240 internal->sw_lm = sw_fallback_lm;
1242 internal->vdev = rte_vdpa_register_device(&pci_dev->device, &ifcvf_ops);
1243 if (internal->vdev == NULL) {
1244 DRV_LOG(ERR, "failed to register device %s", pci_dev->name);
1248 pthread_mutex_lock(&internal_list_lock);
1249 TAILQ_INSERT_TAIL(&internal_list, list, next);
1250 pthread_mutex_unlock(&internal_list_lock);
1252 rte_atomic32_set(&internal->started, 1);
1253 update_datapath(internal);
1255 rte_kvargs_free(kvlist);
1259 rte_kvargs_free(kvlist);
1266 ifcvf_pci_remove(struct rte_pci_device *pci_dev)
1268 struct ifcvf_internal *internal;
1269 struct internal_list *list;
1271 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1274 list = find_internal_resource_by_dev(pci_dev);
1276 DRV_LOG(ERR, "Invalid device: %s", pci_dev->name);
1280 internal = list->internal;
1281 rte_atomic32_set(&internal->started, 0);
1282 update_datapath(internal);
1284 rte_pci_unmap_device(internal->pdev);
1285 rte_vfio_container_destroy(internal->vfio_container_fd);
1286 rte_vdpa_unregister_device(internal->vdev);
1288 pthread_mutex_lock(&internal_list_lock);
1289 TAILQ_REMOVE(&internal_list, list, next);
1290 pthread_mutex_unlock(&internal_list_lock);
1299 * IFCVF has the same vendor ID and device ID as virtio net PCI
1300 * device, with its specific subsystem vendor ID and device ID.
1302 static const struct rte_pci_id pci_id_ifcvf_map[] = {
1303 { .class_id = RTE_CLASS_ANY_ID,
1304 .vendor_id = IFCVF_VENDOR_ID,
1305 .device_id = IFCVF_DEVICE_ID,
1306 .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
1307 .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
1310 { .vendor_id = 0, /* sentinel */
1314 static struct rte_pci_driver rte_ifcvf_vdpa = {
1315 .id_table = pci_id_ifcvf_map,
1317 .probe = ifcvf_pci_probe,
1318 .remove = ifcvf_pci_remove,
1321 RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
1322 RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
1323 RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");