1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <linux/virtio_net.h>
13 #include <rte_malloc.h>
14 #include <rte_memory.h>
15 #include <rte_bus_pci.h>
16 #include <rte_vhost.h>
19 #include <rte_spinlock.h>
21 #include <rte_kvargs.h>
22 #include <rte_devargs.h>
24 #include "base/ifcvf.h"
26 #define DRV_LOG(level, fmt, args...) \
27 rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
28 "IFCVF %s(): " fmt "\n", __func__, ##args)
31 #define PAGE_SIZE 4096
34 #define IFCVF_USED_RING_LEN(size) \
35 ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
37 #define IFCVF_VDPA_MODE "vdpa"
38 #define IFCVF_SW_FALLBACK_LM "sw-live-migration"
40 static const char * const ifcvf_valid_arguments[] = {
46 static int ifcvf_vdpa_logtype;
48 struct ifcvf_internal {
49 struct rte_vdpa_dev_addr dev_addr;
50 struct rte_pci_device *pdev;
52 int vfio_container_fd;
55 pthread_t tid; /* thread for notify relay */
61 rte_atomic32_t started;
62 rte_atomic32_t dev_attached;
63 rte_atomic32_t running;
66 bool sw_fallback_running;
67 /* mediated vring for sw fallback */
68 struct vring m_vring[IFCVF_MAX_QUEUES * 2];
69 /* eventfd for used ring interrupt */
70 int intr_fd[IFCVF_MAX_QUEUES * 2];
73 struct internal_list {
74 TAILQ_ENTRY(internal_list) next;
75 struct ifcvf_internal *internal;
78 TAILQ_HEAD(internal_list_head, internal_list);
79 static struct internal_list_head internal_list =
80 TAILQ_HEAD_INITIALIZER(internal_list);
82 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
84 static void update_used_ring(struct ifcvf_internal *internal, uint16_t qid);
86 static struct internal_list *
87 find_internal_resource_by_did(int did)
90 struct internal_list *list;
92 pthread_mutex_lock(&internal_list_lock);
94 TAILQ_FOREACH(list, &internal_list, next) {
95 if (did == list->internal->did) {
101 pthread_mutex_unlock(&internal_list_lock);
109 static struct internal_list *
110 find_internal_resource_by_dev(struct rte_pci_device *pdev)
113 struct internal_list *list;
115 pthread_mutex_lock(&internal_list_lock);
117 TAILQ_FOREACH(list, &internal_list, next) {
118 if (pdev == list->internal->pdev) {
124 pthread_mutex_unlock(&internal_list_lock);
133 ifcvf_vfio_setup(struct ifcvf_internal *internal)
135 struct rte_pci_device *dev = internal->pdev;
136 char devname[RTE_DEV_NAME_MAX_LEN] = {0};
140 internal->vfio_dev_fd = -1;
141 internal->vfio_group_fd = -1;
142 internal->vfio_container_fd = -1;
144 rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
145 rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
148 internal->vfio_container_fd = rte_vfio_container_create();
149 if (internal->vfio_container_fd < 0)
152 internal->vfio_group_fd = rte_vfio_container_group_bind(
153 internal->vfio_container_fd, iommu_group_num);
154 if (internal->vfio_group_fd < 0)
157 if (rte_pci_map_device(dev))
160 internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd;
162 for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
164 internal->hw.mem_resource[i].addr =
165 internal->pdev->mem_resource[i].addr;
166 internal->hw.mem_resource[i].phys_addr =
167 internal->pdev->mem_resource[i].phys_addr;
168 internal->hw.mem_resource[i].len =
169 internal->pdev->mem_resource[i].len;
175 rte_vfio_container_destroy(internal->vfio_container_fd);
180 ifcvf_dma_map(struct ifcvf_internal *internal, int do_map)
184 struct rte_vhost_memory *mem = NULL;
185 int vfio_container_fd;
187 ret = rte_vhost_get_mem_table(internal->vid, &mem);
189 DRV_LOG(ERR, "failed to get VM memory layout.");
193 vfio_container_fd = internal->vfio_container_fd;
195 for (i = 0; i < mem->nregions; i++) {
196 struct rte_vhost_mem_region *reg;
198 reg = &mem->regions[i];
199 DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", "
200 "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".",
201 do_map ? "DMA map" : "DMA unmap", i,
202 reg->host_user_addr, reg->guest_phys_addr, reg->size);
205 ret = rte_vfio_container_dma_map(vfio_container_fd,
206 reg->host_user_addr, reg->guest_phys_addr,
209 DRV_LOG(ERR, "DMA map failed.");
213 ret = rte_vfio_container_dma_unmap(vfio_container_fd,
214 reg->host_user_addr, reg->guest_phys_addr,
217 DRV_LOG(ERR, "DMA unmap failed.");
230 hva_to_gpa(int vid, uint64_t hva)
232 struct rte_vhost_memory *mem = NULL;
233 struct rte_vhost_mem_region *reg;
237 if (rte_vhost_get_mem_table(vid, &mem) < 0)
240 for (i = 0; i < mem->nregions; i++) {
241 reg = &mem->regions[i];
243 if (hva >= reg->host_user_addr &&
244 hva < reg->host_user_addr + reg->size) {
245 gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
257 vdpa_ifcvf_start(struct ifcvf_internal *internal)
259 struct ifcvf_hw *hw = &internal->hw;
262 struct rte_vhost_vring vq;
266 nr_vring = rte_vhost_get_vring_num(vid);
267 rte_vhost_get_negotiated_features(vid, &hw->req_features);
269 for (i = 0; i < nr_vring; i++) {
270 rte_vhost_get_vhost_vring(vid, i, &vq);
271 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
273 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
276 hw->vring[i].desc = gpa;
278 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
280 DRV_LOG(ERR, "Fail to get GPA for available ring.");
283 hw->vring[i].avail = gpa;
285 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
287 DRV_LOG(ERR, "Fail to get GPA for used ring.");
290 hw->vring[i].used = gpa;
292 hw->vring[i].size = vq.size;
293 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
294 &hw->vring[i].last_used_idx);
298 return ifcvf_start_hw(&internal->hw);
302 vdpa_ifcvf_stop(struct ifcvf_internal *internal)
304 struct ifcvf_hw *hw = &internal->hw;
308 uint64_t log_base, log_size;
314 for (i = 0; i < hw->nr_vring; i++)
315 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
316 hw->vring[i].last_used_idx);
321 rte_vhost_get_negotiated_features(vid, &features);
322 if (RTE_VHOST_NEED_LOG(features)) {
323 ifcvf_disable_logging(hw);
324 rte_vhost_get_log_base(internal->vid, &log_base, &log_size);
325 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
326 log_base, IFCVF_LOG_BASE, log_size);
328 * IFCVF marks dirty memory pages for only packet buffer,
329 * SW helps to mark the used ring as dirty after device stops.
331 for (i = 0; i < hw->nr_vring; i++) {
332 len = IFCVF_USED_RING_LEN(hw->vring[i].size);
333 rte_vhost_log_used_vring(vid, i, 0, len);
338 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
339 sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
341 vdpa_enable_vfio_intr(struct ifcvf_internal *internal, bool m_rx)
344 uint32_t i, nr_vring;
345 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
346 struct vfio_irq_set *irq_set;
348 struct rte_vhost_vring vring;
351 nr_vring = rte_vhost_get_vring_num(internal->vid);
353 irq_set = (struct vfio_irq_set *)irq_set_buf;
354 irq_set->argsz = sizeof(irq_set_buf);
355 irq_set->count = nr_vring + 1;
356 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
357 VFIO_IRQ_SET_ACTION_TRIGGER;
358 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
360 fd_ptr = (int *)&irq_set->data;
361 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd;
363 for (i = 0; i < nr_vring; i++)
364 internal->intr_fd[i] = -1;
366 for (i = 0; i < nr_vring; i++) {
367 rte_vhost_get_vhost_vring(internal->vid, i, &vring);
368 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
369 if ((i & 1) == 0 && m_rx == true) {
370 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
372 DRV_LOG(ERR, "can't setup eventfd: %s",
376 internal->intr_fd[i] = fd;
377 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = fd;
381 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
383 DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s",
392 vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
395 uint32_t i, nr_vring;
396 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
397 struct vfio_irq_set *irq_set;
399 irq_set = (struct vfio_irq_set *)irq_set_buf;
400 irq_set->argsz = sizeof(irq_set_buf);
402 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
403 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
406 nr_vring = rte_vhost_get_vring_num(internal->vid);
407 for (i = 0; i < nr_vring; i++) {
408 if (internal->intr_fd[i] >= 0)
409 close(internal->intr_fd[i]);
410 internal->intr_fd[i] = -1;
413 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
415 DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
424 notify_relay(void *arg)
426 int i, kickfd, epfd, nfds = 0;
428 struct epoll_event events[IFCVF_MAX_QUEUES * 2];
429 struct epoll_event ev;
432 struct rte_vhost_vring vring;
433 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
434 struct ifcvf_hw *hw = &internal->hw;
436 q_num = rte_vhost_get_vring_num(internal->vid);
438 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
440 DRV_LOG(ERR, "failed to create epoll instance.");
443 internal->epfd = epfd;
445 for (qid = 0; qid < q_num; qid++) {
446 ev.events = EPOLLIN | EPOLLPRI;
447 rte_vhost_get_vhost_vring(internal->vid, qid, &vring);
448 ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
449 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
450 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
456 nfds = epoll_wait(epfd, events, q_num, -1);
460 DRV_LOG(ERR, "epoll_wait return fail\n");
464 for (i = 0; i < nfds; i++) {
465 qid = events[i].data.u32;
466 kickfd = (uint32_t)(events[i].data.u64 >> 32);
468 nbytes = read(kickfd, &buf, 8);
470 if (errno == EINTR ||
471 errno == EWOULDBLOCK ||
474 DRV_LOG(INFO, "Error reading "
481 ifcvf_notify_queue(hw, qid);
489 setup_notify_relay(struct ifcvf_internal *internal)
493 ret = pthread_create(&internal->tid, NULL, notify_relay,
496 DRV_LOG(ERR, "failed to create notify relay pthread.");
503 unset_notify_relay(struct ifcvf_internal *internal)
508 pthread_cancel(internal->tid);
509 pthread_join(internal->tid, &status);
513 if (internal->epfd >= 0)
514 close(internal->epfd);
521 update_datapath(struct ifcvf_internal *internal)
525 rte_spinlock_lock(&internal->lock);
527 if (!rte_atomic32_read(&internal->running) &&
528 (rte_atomic32_read(&internal->started) &&
529 rte_atomic32_read(&internal->dev_attached))) {
530 ret = ifcvf_dma_map(internal, 1);
534 ret = vdpa_enable_vfio_intr(internal, 0);
538 ret = vdpa_ifcvf_start(internal);
542 ret = setup_notify_relay(internal);
546 rte_atomic32_set(&internal->running, 1);
547 } else if (rte_atomic32_read(&internal->running) &&
548 (!rte_atomic32_read(&internal->started) ||
549 !rte_atomic32_read(&internal->dev_attached))) {
550 ret = unset_notify_relay(internal);
554 vdpa_ifcvf_stop(internal);
556 ret = vdpa_disable_vfio_intr(internal);
560 ret = ifcvf_dma_map(internal, 0);
564 rte_atomic32_set(&internal->running, 0);
567 rte_spinlock_unlock(&internal->lock);
570 rte_spinlock_unlock(&internal->lock);
575 m_ifcvf_start(struct ifcvf_internal *internal)
577 struct ifcvf_hw *hw = &internal->hw;
578 uint32_t i, nr_vring;
580 struct rte_vhost_vring vq;
582 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
587 nr_vring = rte_vhost_get_vring_num(vid);
588 rte_vhost_get_negotiated_features(vid, &hw->req_features);
590 for (i = 0; i < nr_vring; i++) {
591 rte_vhost_get_vhost_vring(vid, i, &vq);
593 size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
595 vring_buf = rte_zmalloc("ifcvf", size, PAGE_SIZE);
596 vring_init(&internal->m_vring[i], vq.size, vring_buf,
599 ret = rte_vfio_container_dma_map(internal->vfio_container_fd,
600 (uint64_t)(uintptr_t)vring_buf, m_vring_iova, size);
602 DRV_LOG(ERR, "mediated vring DMA map failed.");
606 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
608 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
611 hw->vring[i].desc = gpa;
613 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
615 DRV_LOG(ERR, "Fail to get GPA for available ring.");
618 hw->vring[i].avail = gpa;
620 /* Direct I/O for Tx queue, relay for Rx queue */
622 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
624 DRV_LOG(ERR, "Fail to get GPA for used ring.");
627 hw->vring[i].used = gpa;
629 hw->vring[i].used = m_vring_iova +
630 (char *)internal->m_vring[i].used -
631 (char *)internal->m_vring[i].desc;
634 hw->vring[i].size = vq.size;
636 rte_vhost_get_vring_base(vid, i,
637 &internal->m_vring[i].avail->idx,
638 &internal->m_vring[i].used->idx);
640 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
641 &hw->vring[i].last_used_idx);
643 m_vring_iova += size;
645 hw->nr_vring = nr_vring;
647 return ifcvf_start_hw(&internal->hw);
650 for (i = 0; i < nr_vring; i++)
651 if (internal->m_vring[i].desc)
652 rte_free(internal->m_vring[i].desc);
658 m_ifcvf_stop(struct ifcvf_internal *internal)
662 struct rte_vhost_vring vq;
663 struct ifcvf_hw *hw = &internal->hw;
664 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
670 for (i = 0; i < hw->nr_vring; i++) {
671 /* synchronize remaining new used entries if any */
673 update_used_ring(internal, i);
675 rte_vhost_get_vhost_vring(vid, i, &vq);
676 len = IFCVF_USED_RING_LEN(vq.size);
677 rte_vhost_log_used_vring(vid, i, 0, len);
679 size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
681 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
682 (uint64_t)(uintptr_t)internal->m_vring[i].desc,
685 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
686 hw->vring[i].last_used_idx);
687 rte_free(internal->m_vring[i].desc);
688 m_vring_iova += size;
695 update_used_ring(struct ifcvf_internal *internal, uint16_t qid)
697 rte_vdpa_relay_vring_used(internal->vid, qid, &internal->m_vring[qid]);
698 rte_vhost_vring_call(internal->vid, qid);
702 vring_relay(void *arg)
704 int i, vid, epfd, fd, nfds;
705 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
706 struct rte_vhost_vring vring;
708 struct epoll_event events[IFCVF_MAX_QUEUES * 4];
709 struct epoll_event ev;
714 q_num = rte_vhost_get_vring_num(vid);
716 /* add notify fd and interrupt fd to epoll */
717 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
719 DRV_LOG(ERR, "failed to create epoll instance.");
722 internal->epfd = epfd;
724 for (qid = 0; qid < q_num; qid++) {
725 ev.events = EPOLLIN | EPOLLPRI;
726 rte_vhost_get_vhost_vring(vid, qid, &vring);
727 ev.data.u64 = qid << 1 | (uint64_t)vring.kickfd << 32;
728 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
729 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
734 for (qid = 0; qid < q_num; qid += 2) {
735 ev.events = EPOLLIN | EPOLLPRI;
736 /* leave a flag to mark it's for interrupt */
737 ev.data.u64 = 1 | qid << 1 |
738 (uint64_t)internal->intr_fd[qid] << 32;
739 if (epoll_ctl(epfd, EPOLL_CTL_ADD, internal->intr_fd[qid], &ev)
741 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
744 update_used_ring(internal, qid);
747 /* start relay with a first kick */
748 for (qid = 0; qid < q_num; qid++)
749 ifcvf_notify_queue(&internal->hw, qid);
751 /* listen to the events and react accordingly */
753 nfds = epoll_wait(epfd, events, q_num * 2, -1);
757 DRV_LOG(ERR, "epoll_wait return fail\n");
761 for (i = 0; i < nfds; i++) {
762 fd = (uint32_t)(events[i].data.u64 >> 32);
764 nbytes = read(fd, &buf, 8);
766 if (errno == EINTR ||
767 errno == EWOULDBLOCK ||
770 DRV_LOG(INFO, "Error reading "
777 qid = events[i].data.u32 >> 1;
779 if (events[i].data.u32 & 1)
780 update_used_ring(internal, qid);
782 ifcvf_notify_queue(&internal->hw, qid);
790 setup_vring_relay(struct ifcvf_internal *internal)
794 ret = pthread_create(&internal->tid, NULL, vring_relay,
797 DRV_LOG(ERR, "failed to create ring relay pthread.");
804 unset_vring_relay(struct ifcvf_internal *internal)
809 pthread_cancel(internal->tid);
810 pthread_join(internal->tid, &status);
814 if (internal->epfd >= 0)
815 close(internal->epfd);
822 ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal)
825 int vid = internal->vid;
827 /* stop the direct IO data path */
828 unset_notify_relay(internal);
829 vdpa_ifcvf_stop(internal);
830 vdpa_disable_vfio_intr(internal);
832 ret = rte_vhost_host_notifier_ctrl(vid, false);
833 if (ret && ret != -ENOTSUP)
836 /* set up interrupt for interrupt relay */
837 ret = vdpa_enable_vfio_intr(internal, 1);
842 ret = m_ifcvf_start(internal);
846 /* set up vring relay thread */
847 ret = setup_vring_relay(internal);
851 rte_vhost_host_notifier_ctrl(vid, true);
853 internal->sw_fallback_running = true;
858 m_ifcvf_stop(internal);
860 vdpa_disable_vfio_intr(internal);
862 ifcvf_dma_map(internal, 0);
868 ifcvf_dev_config(int vid)
871 struct internal_list *list;
872 struct ifcvf_internal *internal;
874 did = rte_vhost_get_vdpa_device_id(vid);
875 list = find_internal_resource_by_did(did);
877 DRV_LOG(ERR, "Invalid device id: %d", did);
881 internal = list->internal;
883 rte_atomic32_set(&internal->dev_attached, 1);
884 update_datapath(internal);
886 if (rte_vhost_host_notifier_ctrl(vid, true) != 0)
887 DRV_LOG(NOTICE, "vDPA (%d): software relay is used.", did);
893 ifcvf_dev_close(int vid)
896 struct internal_list *list;
897 struct ifcvf_internal *internal;
899 did = rte_vhost_get_vdpa_device_id(vid);
900 list = find_internal_resource_by_did(did);
902 DRV_LOG(ERR, "Invalid device id: %d", did);
906 internal = list->internal;
908 if (internal->sw_fallback_running) {
909 /* unset ring relay */
910 unset_vring_relay(internal);
913 m_ifcvf_stop(internal);
915 /* remove interrupt setting */
916 vdpa_disable_vfio_intr(internal);
918 /* unset DMA map for guest memory */
919 ifcvf_dma_map(internal, 0);
921 internal->sw_fallback_running = false;
923 rte_atomic32_set(&internal->dev_attached, 0);
924 update_datapath(internal);
931 ifcvf_set_features(int vid)
935 struct internal_list *list;
936 struct ifcvf_internal *internal;
937 uint64_t log_base, log_size;
939 did = rte_vhost_get_vdpa_device_id(vid);
940 list = find_internal_resource_by_did(did);
942 DRV_LOG(ERR, "Invalid device id: %d", did);
946 internal = list->internal;
947 rte_vhost_get_negotiated_features(vid, &features);
949 if (!RTE_VHOST_NEED_LOG(features))
952 if (internal->sw_lm) {
953 ifcvf_sw_fallback_switchover(internal);
955 rte_vhost_get_log_base(vid, &log_base, &log_size);
956 rte_vfio_container_dma_map(internal->vfio_container_fd,
957 log_base, IFCVF_LOG_BASE, log_size);
958 ifcvf_enable_logging(&internal->hw, IFCVF_LOG_BASE, log_size);
965 ifcvf_get_vfio_group_fd(int vid)
968 struct internal_list *list;
970 did = rte_vhost_get_vdpa_device_id(vid);
971 list = find_internal_resource_by_did(did);
973 DRV_LOG(ERR, "Invalid device id: %d", did);
977 return list->internal->vfio_group_fd;
981 ifcvf_get_vfio_device_fd(int vid)
984 struct internal_list *list;
986 did = rte_vhost_get_vdpa_device_id(vid);
987 list = find_internal_resource_by_did(did);
989 DRV_LOG(ERR, "Invalid device id: %d", did);
993 return list->internal->vfio_dev_fd;
997 ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
1000 struct internal_list *list;
1001 struct ifcvf_internal *internal;
1002 struct vfio_region_info reg = { .argsz = sizeof(reg) };
1005 did = rte_vhost_get_vdpa_device_id(vid);
1006 list = find_internal_resource_by_did(did);
1008 DRV_LOG(ERR, "Invalid device id: %d", did);
1012 internal = list->internal;
1014 reg.index = ifcvf_get_notify_region(&internal->hw);
1015 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
1017 DRV_LOG(ERR, "Get not get device region info: %s",
1022 *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset;
1029 ifcvf_get_queue_num(int did, uint32_t *queue_num)
1031 struct internal_list *list;
1033 list = find_internal_resource_by_did(did);
1035 DRV_LOG(ERR, "Invalid device id: %d", did);
1039 *queue_num = list->internal->max_queues;
1045 ifcvf_get_vdpa_features(int did, uint64_t *features)
1047 struct internal_list *list;
1049 list = find_internal_resource_by_did(did);
1051 DRV_LOG(ERR, "Invalid device id: %d", did);
1055 *features = list->internal->features;
1060 #define VDPA_SUPPORTED_PROTOCOL_FEATURES \
1061 (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
1062 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
1063 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
1064 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
1065 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD)
1067 ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features)
1069 *features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
1073 static struct rte_vdpa_dev_ops ifcvf_ops = {
1074 .get_queue_num = ifcvf_get_queue_num,
1075 .get_features = ifcvf_get_vdpa_features,
1076 .get_protocol_features = ifcvf_get_protocol_features,
1077 .dev_conf = ifcvf_dev_config,
1078 .dev_close = ifcvf_dev_close,
1079 .set_vring_state = NULL,
1080 .set_features = ifcvf_set_features,
1081 .migration_done = NULL,
1082 .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
1083 .get_vfio_device_fd = ifcvf_get_vfio_device_fd,
1084 .get_notify_area = ifcvf_get_notify_area,
1088 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1090 uint16_t *n = extra_args;
1092 if (value == NULL || extra_args == NULL)
1095 *n = (uint16_t)strtoul(value, NULL, 0);
1096 if (*n == USHRT_MAX && errno == ERANGE)
1103 ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1104 struct rte_pci_device *pci_dev)
1107 struct ifcvf_internal *internal = NULL;
1108 struct internal_list *list = NULL;
1110 int sw_fallback_lm = 0;
1111 struct rte_kvargs *kvlist = NULL;
1114 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1117 if (!pci_dev->device.devargs)
1120 kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
1121 ifcvf_valid_arguments);
1125 /* probe only when vdpa mode is specified */
1126 if (rte_kvargs_count(kvlist, IFCVF_VDPA_MODE) == 0) {
1127 rte_kvargs_free(kvlist);
1131 ret = rte_kvargs_process(kvlist, IFCVF_VDPA_MODE, &open_int,
1133 if (ret < 0 || vdpa_mode == 0) {
1134 rte_kvargs_free(kvlist);
1138 list = rte_zmalloc("ifcvf", sizeof(*list), 0);
1142 internal = rte_zmalloc("ifcvf", sizeof(*internal), 0);
1143 if (internal == NULL)
1146 internal->pdev = pci_dev;
1147 rte_spinlock_init(&internal->lock);
1149 if (ifcvf_vfio_setup(internal) < 0) {
1150 DRV_LOG(ERR, "failed to setup device %s", pci_dev->name);
1154 if (ifcvf_init_hw(&internal->hw, internal->pdev) < 0) {
1155 DRV_LOG(ERR, "failed to init device %s", pci_dev->name);
1159 internal->max_queues = IFCVF_MAX_QUEUES;
1160 features = ifcvf_get_features(&internal->hw);
1161 internal->features = (features &
1162 ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
1163 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
1164 (1ULL << VIRTIO_NET_F_CTRL_VQ) |
1165 (1ULL << VIRTIO_NET_F_STATUS) |
1166 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
1167 (1ULL << VHOST_F_LOG_ALL);
1169 internal->dev_addr.pci_addr = pci_dev->addr;
1170 internal->dev_addr.type = PCI_ADDR;
1171 list->internal = internal;
1173 if (rte_kvargs_count(kvlist, IFCVF_SW_FALLBACK_LM)) {
1174 ret = rte_kvargs_process(kvlist, IFCVF_SW_FALLBACK_LM,
1175 &open_int, &sw_fallback_lm);
1179 internal->sw_lm = sw_fallback_lm;
1181 internal->did = rte_vdpa_register_device(&internal->dev_addr,
1183 if (internal->did < 0) {
1184 DRV_LOG(ERR, "failed to register device %s", pci_dev->name);
1188 pthread_mutex_lock(&internal_list_lock);
1189 TAILQ_INSERT_TAIL(&internal_list, list, next);
1190 pthread_mutex_unlock(&internal_list_lock);
1192 rte_atomic32_set(&internal->started, 1);
1193 update_datapath(internal);
1195 rte_kvargs_free(kvlist);
1199 rte_kvargs_free(kvlist);
1206 ifcvf_pci_remove(struct rte_pci_device *pci_dev)
1208 struct ifcvf_internal *internal;
1209 struct internal_list *list;
1211 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1214 list = find_internal_resource_by_dev(pci_dev);
1216 DRV_LOG(ERR, "Invalid device: %s", pci_dev->name);
1220 internal = list->internal;
1221 rte_atomic32_set(&internal->started, 0);
1222 update_datapath(internal);
1224 rte_pci_unmap_device(internal->pdev);
1225 rte_vfio_container_destroy(internal->vfio_container_fd);
1226 rte_vdpa_unregister_device(internal->did);
1228 pthread_mutex_lock(&internal_list_lock);
1229 TAILQ_REMOVE(&internal_list, list, next);
1230 pthread_mutex_unlock(&internal_list_lock);
1239 * IFCVF has the same vendor ID and device ID as virtio net PCI
1240 * device, with its specific subsystem vendor ID and device ID.
1242 static const struct rte_pci_id pci_id_ifcvf_map[] = {
1243 { .class_id = RTE_CLASS_ANY_ID,
1244 .vendor_id = IFCVF_VENDOR_ID,
1245 .device_id = IFCVF_DEVICE_ID,
1246 .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
1247 .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
1250 { .vendor_id = 0, /* sentinel */
1254 static struct rte_pci_driver rte_ifcvf_vdpa = {
1255 .id_table = pci_id_ifcvf_map,
1257 .probe = ifcvf_pci_probe,
1258 .remove = ifcvf_pci_remove,
1261 RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
1262 RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
1263 RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");
1265 RTE_INIT(ifcvf_vdpa_init_log)
1267 ifcvf_vdpa_logtype = rte_log_register("pmd.net.ifcvf_vdpa");
1268 if (ifcvf_vdpa_logtype >= 0)
1269 rte_log_set_level(ifcvf_vdpa_logtype, RTE_LOG_NOTICE);