1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <sys/epoll.h>
11 #include <linux/virtio_net.h>
14 #include <rte_malloc.h>
15 #include <rte_memory.h>
16 #include <rte_bus_pci.h>
17 #include <rte_vhost.h>
20 #include <rte_spinlock.h>
22 #include <rte_kvargs.h>
23 #include <rte_devargs.h>
25 #include "base/ifcvf.h"
27 #define DRV_LOG(level, fmt, args...) \
28 rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
29 "IFCVF %s(): " fmt "\n", __func__, ##args)
32 #define PAGE_SIZE 4096
35 #define IFCVF_USED_RING_LEN(size) \
36 ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
38 #define IFCVF_VDPA_MODE "vdpa"
39 #define IFCVF_SW_FALLBACK_LM "sw-live-migration"
41 static const char * const ifcvf_valid_arguments[] = {
47 static int ifcvf_vdpa_logtype;
49 struct ifcvf_internal {
50 struct rte_vdpa_dev_addr dev_addr;
51 struct rte_pci_device *pdev;
53 int vfio_container_fd;
56 pthread_t tid; /* thread for notify relay */
62 rte_atomic32_t started;
63 rte_atomic32_t dev_attached;
64 rte_atomic32_t running;
67 bool sw_fallback_running;
68 /* mediated vring for sw fallback */
69 struct vring m_vring[IFCVF_MAX_QUEUES * 2];
70 /* eventfd for used ring interrupt */
71 int intr_fd[IFCVF_MAX_QUEUES * 2];
74 struct internal_list {
75 TAILQ_ENTRY(internal_list) next;
76 struct ifcvf_internal *internal;
79 TAILQ_HEAD(internal_list_head, internal_list);
80 static struct internal_list_head internal_list =
81 TAILQ_HEAD_INITIALIZER(internal_list);
83 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
85 static void update_used_ring(struct ifcvf_internal *internal, uint16_t qid);
87 static struct internal_list *
88 find_internal_resource_by_did(int did)
91 struct internal_list *list;
93 pthread_mutex_lock(&internal_list_lock);
95 TAILQ_FOREACH(list, &internal_list, next) {
96 if (did == list->internal->did) {
102 pthread_mutex_unlock(&internal_list_lock);
110 static struct internal_list *
111 find_internal_resource_by_dev(struct rte_pci_device *pdev)
114 struct internal_list *list;
116 pthread_mutex_lock(&internal_list_lock);
118 TAILQ_FOREACH(list, &internal_list, next) {
119 if (pdev == list->internal->pdev) {
125 pthread_mutex_unlock(&internal_list_lock);
134 ifcvf_vfio_setup(struct ifcvf_internal *internal)
136 struct rte_pci_device *dev = internal->pdev;
137 char devname[RTE_DEV_NAME_MAX_LEN] = {0};
141 internal->vfio_dev_fd = -1;
142 internal->vfio_group_fd = -1;
143 internal->vfio_container_fd = -1;
145 rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
146 rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
149 internal->vfio_container_fd = rte_vfio_container_create();
150 if (internal->vfio_container_fd < 0)
153 internal->vfio_group_fd = rte_vfio_container_group_bind(
154 internal->vfio_container_fd, iommu_group_num);
155 if (internal->vfio_group_fd < 0)
158 if (rte_pci_map_device(dev))
161 internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd;
163 for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
165 internal->hw.mem_resource[i].addr =
166 internal->pdev->mem_resource[i].addr;
167 internal->hw.mem_resource[i].phys_addr =
168 internal->pdev->mem_resource[i].phys_addr;
169 internal->hw.mem_resource[i].len =
170 internal->pdev->mem_resource[i].len;
176 rte_vfio_container_destroy(internal->vfio_container_fd);
181 ifcvf_dma_map(struct ifcvf_internal *internal, int do_map)
185 struct rte_vhost_memory *mem = NULL;
186 int vfio_container_fd;
188 ret = rte_vhost_get_mem_table(internal->vid, &mem);
190 DRV_LOG(ERR, "failed to get VM memory layout.");
194 vfio_container_fd = internal->vfio_container_fd;
196 for (i = 0; i < mem->nregions; i++) {
197 struct rte_vhost_mem_region *reg;
199 reg = &mem->regions[i];
200 DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", "
201 "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".",
202 do_map ? "DMA map" : "DMA unmap", i,
203 reg->host_user_addr, reg->guest_phys_addr, reg->size);
206 ret = rte_vfio_container_dma_map(vfio_container_fd,
207 reg->host_user_addr, reg->guest_phys_addr,
210 DRV_LOG(ERR, "DMA map failed.");
214 ret = rte_vfio_container_dma_unmap(vfio_container_fd,
215 reg->host_user_addr, reg->guest_phys_addr,
218 DRV_LOG(ERR, "DMA unmap failed.");
231 hva_to_gpa(int vid, uint64_t hva)
233 struct rte_vhost_memory *mem = NULL;
234 struct rte_vhost_mem_region *reg;
238 if (rte_vhost_get_mem_table(vid, &mem) < 0)
241 for (i = 0; i < mem->nregions; i++) {
242 reg = &mem->regions[i];
244 if (hva >= reg->host_user_addr &&
245 hva < reg->host_user_addr + reg->size) {
246 gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
258 vdpa_ifcvf_start(struct ifcvf_internal *internal)
260 struct ifcvf_hw *hw = &internal->hw;
263 struct rte_vhost_vring vq;
267 nr_vring = rte_vhost_get_vring_num(vid);
268 rte_vhost_get_negotiated_features(vid, &hw->req_features);
270 for (i = 0; i < nr_vring; i++) {
271 rte_vhost_get_vhost_vring(vid, i, &vq);
272 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
274 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
277 hw->vring[i].desc = gpa;
279 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
281 DRV_LOG(ERR, "Fail to get GPA for available ring.");
284 hw->vring[i].avail = gpa;
286 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
288 DRV_LOG(ERR, "Fail to get GPA for used ring.");
291 hw->vring[i].used = gpa;
293 hw->vring[i].size = vq.size;
294 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
295 &hw->vring[i].last_used_idx);
299 return ifcvf_start_hw(&internal->hw);
303 vdpa_ifcvf_stop(struct ifcvf_internal *internal)
305 struct ifcvf_hw *hw = &internal->hw;
308 uint64_t features = 0;
309 uint64_t log_base = 0, log_size = 0;
315 for (i = 0; i < hw->nr_vring; i++)
316 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
317 hw->vring[i].last_used_idx);
322 rte_vhost_get_negotiated_features(vid, &features);
323 if (RTE_VHOST_NEED_LOG(features)) {
324 ifcvf_disable_logging(hw);
325 rte_vhost_get_log_base(internal->vid, &log_base, &log_size);
326 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
327 log_base, IFCVF_LOG_BASE, log_size);
329 * IFCVF marks dirty memory pages for only packet buffer,
330 * SW helps to mark the used ring as dirty after device stops.
332 for (i = 0; i < hw->nr_vring; i++) {
333 len = IFCVF_USED_RING_LEN(hw->vring[i].size);
334 rte_vhost_log_used_vring(vid, i, 0, len);
339 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
340 sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
342 vdpa_enable_vfio_intr(struct ifcvf_internal *internal, bool m_rx)
345 uint32_t i, nr_vring;
346 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
347 struct vfio_irq_set *irq_set;
349 struct rte_vhost_vring vring;
354 nr_vring = rte_vhost_get_vring_num(internal->vid);
356 irq_set = (struct vfio_irq_set *)irq_set_buf;
357 irq_set->argsz = sizeof(irq_set_buf);
358 irq_set->count = nr_vring + 1;
359 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
360 VFIO_IRQ_SET_ACTION_TRIGGER;
361 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
363 fd_ptr = (int *)&irq_set->data;
364 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd;
366 for (i = 0; i < nr_vring; i++)
367 internal->intr_fd[i] = -1;
369 for (i = 0; i < nr_vring; i++) {
370 rte_vhost_get_vhost_vring(internal->vid, i, &vring);
371 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
372 if ((i & 1) == 0 && m_rx == true) {
373 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
375 DRV_LOG(ERR, "can't setup eventfd: %s",
379 internal->intr_fd[i] = fd;
380 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = fd;
384 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
386 DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s",
395 vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
398 uint32_t i, nr_vring;
399 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
400 struct vfio_irq_set *irq_set;
402 irq_set = (struct vfio_irq_set *)irq_set_buf;
403 irq_set->argsz = sizeof(irq_set_buf);
405 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
406 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
409 nr_vring = rte_vhost_get_vring_num(internal->vid);
410 for (i = 0; i < nr_vring; i++) {
411 if (internal->intr_fd[i] >= 0)
412 close(internal->intr_fd[i]);
413 internal->intr_fd[i] = -1;
416 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
418 DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
427 notify_relay(void *arg)
429 int i, kickfd, epfd, nfds = 0;
431 struct epoll_event events[IFCVF_MAX_QUEUES * 2];
432 struct epoll_event ev;
435 struct rte_vhost_vring vring;
436 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
437 struct ifcvf_hw *hw = &internal->hw;
439 q_num = rte_vhost_get_vring_num(internal->vid);
441 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
443 DRV_LOG(ERR, "failed to create epoll instance.");
446 internal->epfd = epfd;
449 for (qid = 0; qid < q_num; qid++) {
450 ev.events = EPOLLIN | EPOLLPRI;
451 rte_vhost_get_vhost_vring(internal->vid, qid, &vring);
452 ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
453 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
454 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
460 nfds = epoll_wait(epfd, events, q_num, -1);
464 DRV_LOG(ERR, "epoll_wait return fail\n");
468 for (i = 0; i < nfds; i++) {
469 qid = events[i].data.u32;
470 kickfd = (uint32_t)(events[i].data.u64 >> 32);
472 nbytes = read(kickfd, &buf, 8);
474 if (errno == EINTR ||
475 errno == EWOULDBLOCK ||
478 DRV_LOG(INFO, "Error reading "
485 ifcvf_notify_queue(hw, qid);
493 setup_notify_relay(struct ifcvf_internal *internal)
497 ret = pthread_create(&internal->tid, NULL, notify_relay,
500 DRV_LOG(ERR, "failed to create notify relay pthread.");
507 unset_notify_relay(struct ifcvf_internal *internal)
512 pthread_cancel(internal->tid);
513 pthread_join(internal->tid, &status);
517 if (internal->epfd >= 0)
518 close(internal->epfd);
525 update_datapath(struct ifcvf_internal *internal)
529 rte_spinlock_lock(&internal->lock);
531 if (!rte_atomic32_read(&internal->running) &&
532 (rte_atomic32_read(&internal->started) &&
533 rte_atomic32_read(&internal->dev_attached))) {
534 ret = ifcvf_dma_map(internal, 1);
538 ret = vdpa_enable_vfio_intr(internal, 0);
542 ret = vdpa_ifcvf_start(internal);
546 ret = setup_notify_relay(internal);
550 rte_atomic32_set(&internal->running, 1);
551 } else if (rte_atomic32_read(&internal->running) &&
552 (!rte_atomic32_read(&internal->started) ||
553 !rte_atomic32_read(&internal->dev_attached))) {
554 ret = unset_notify_relay(internal);
558 vdpa_ifcvf_stop(internal);
560 ret = vdpa_disable_vfio_intr(internal);
564 ret = ifcvf_dma_map(internal, 0);
568 rte_atomic32_set(&internal->running, 0);
571 rte_spinlock_unlock(&internal->lock);
574 rte_spinlock_unlock(&internal->lock);
579 m_ifcvf_start(struct ifcvf_internal *internal)
581 struct ifcvf_hw *hw = &internal->hw;
582 uint32_t i, nr_vring;
584 struct rte_vhost_vring vq;
586 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
590 memset(&vq, 0, sizeof(vq));
592 nr_vring = rte_vhost_get_vring_num(vid);
593 rte_vhost_get_negotiated_features(vid, &hw->req_features);
595 for (i = 0; i < nr_vring; i++) {
596 rte_vhost_get_vhost_vring(vid, i, &vq);
598 size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
600 vring_buf = rte_zmalloc("ifcvf", size, PAGE_SIZE);
601 vring_init(&internal->m_vring[i], vq.size, vring_buf,
604 ret = rte_vfio_container_dma_map(internal->vfio_container_fd,
605 (uint64_t)(uintptr_t)vring_buf, m_vring_iova, size);
607 DRV_LOG(ERR, "mediated vring DMA map failed.");
611 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
613 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
616 hw->vring[i].desc = gpa;
618 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
620 DRV_LOG(ERR, "Fail to get GPA for available ring.");
623 hw->vring[i].avail = gpa;
625 /* Direct I/O for Tx queue, relay for Rx queue */
627 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
629 DRV_LOG(ERR, "Fail to get GPA for used ring.");
632 hw->vring[i].used = gpa;
634 hw->vring[i].used = m_vring_iova +
635 (char *)internal->m_vring[i].used -
636 (char *)internal->m_vring[i].desc;
639 hw->vring[i].size = vq.size;
641 rte_vhost_get_vring_base(vid, i,
642 &internal->m_vring[i].avail->idx,
643 &internal->m_vring[i].used->idx);
645 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
646 &hw->vring[i].last_used_idx);
648 m_vring_iova += size;
650 hw->nr_vring = nr_vring;
652 return ifcvf_start_hw(&internal->hw);
655 for (i = 0; i < nr_vring; i++)
656 if (internal->m_vring[i].desc)
657 rte_free(internal->m_vring[i].desc);
663 m_ifcvf_stop(struct ifcvf_internal *internal)
667 struct rte_vhost_vring vq;
668 struct ifcvf_hw *hw = &internal->hw;
669 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
675 for (i = 0; i < hw->nr_vring; i++) {
676 /* synchronize remaining new used entries if any */
678 update_used_ring(internal, i);
680 rte_vhost_get_vhost_vring(vid, i, &vq);
681 len = IFCVF_USED_RING_LEN(vq.size);
682 rte_vhost_log_used_vring(vid, i, 0, len);
684 size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
686 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
687 (uint64_t)(uintptr_t)internal->m_vring[i].desc,
690 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
691 hw->vring[i].last_used_idx);
692 rte_free(internal->m_vring[i].desc);
693 m_vring_iova += size;
700 update_used_ring(struct ifcvf_internal *internal, uint16_t qid)
702 rte_vdpa_relay_vring_used(internal->vid, qid, &internal->m_vring[qid]);
703 rte_vhost_vring_call(internal->vid, qid);
707 vring_relay(void *arg)
709 int i, vid, epfd, fd, nfds;
710 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
711 struct rte_vhost_vring vring;
713 struct epoll_event events[IFCVF_MAX_QUEUES * 4];
714 struct epoll_event ev;
719 q_num = rte_vhost_get_vring_num(vid);
721 /* add notify fd and interrupt fd to epoll */
722 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
724 DRV_LOG(ERR, "failed to create epoll instance.");
727 internal->epfd = epfd;
730 for (qid = 0; qid < q_num; qid++) {
731 ev.events = EPOLLIN | EPOLLPRI;
732 rte_vhost_get_vhost_vring(vid, qid, &vring);
733 ev.data.u64 = qid << 1 | (uint64_t)vring.kickfd << 32;
734 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
735 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
740 for (qid = 0; qid < q_num; qid += 2) {
741 ev.events = EPOLLIN | EPOLLPRI;
742 /* leave a flag to mark it's for interrupt */
743 ev.data.u64 = 1 | qid << 1 |
744 (uint64_t)internal->intr_fd[qid] << 32;
745 if (epoll_ctl(epfd, EPOLL_CTL_ADD, internal->intr_fd[qid], &ev)
747 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
750 update_used_ring(internal, qid);
753 /* start relay with a first kick */
754 for (qid = 0; qid < q_num; qid++)
755 ifcvf_notify_queue(&internal->hw, qid);
757 /* listen to the events and react accordingly */
759 nfds = epoll_wait(epfd, events, q_num * 2, -1);
763 DRV_LOG(ERR, "epoll_wait return fail\n");
767 for (i = 0; i < nfds; i++) {
768 fd = (uint32_t)(events[i].data.u64 >> 32);
770 nbytes = read(fd, &buf, 8);
772 if (errno == EINTR ||
773 errno == EWOULDBLOCK ||
776 DRV_LOG(INFO, "Error reading "
783 qid = events[i].data.u32 >> 1;
785 if (events[i].data.u32 & 1)
786 update_used_ring(internal, qid);
788 ifcvf_notify_queue(&internal->hw, qid);
796 setup_vring_relay(struct ifcvf_internal *internal)
800 ret = pthread_create(&internal->tid, NULL, vring_relay,
803 DRV_LOG(ERR, "failed to create ring relay pthread.");
810 unset_vring_relay(struct ifcvf_internal *internal)
815 pthread_cancel(internal->tid);
816 pthread_join(internal->tid, &status);
820 if (internal->epfd >= 0)
821 close(internal->epfd);
828 ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal)
831 int vid = internal->vid;
833 /* stop the direct IO data path */
834 unset_notify_relay(internal);
835 vdpa_ifcvf_stop(internal);
836 vdpa_disable_vfio_intr(internal);
838 ret = rte_vhost_host_notifier_ctrl(vid, false);
839 if (ret && ret != -ENOTSUP)
842 /* set up interrupt for interrupt relay */
843 ret = vdpa_enable_vfio_intr(internal, 1);
848 ret = m_ifcvf_start(internal);
852 /* set up vring relay thread */
853 ret = setup_vring_relay(internal);
857 rte_vhost_host_notifier_ctrl(vid, true);
859 internal->sw_fallback_running = true;
864 m_ifcvf_stop(internal);
866 vdpa_disable_vfio_intr(internal);
868 ifcvf_dma_map(internal, 0);
874 ifcvf_dev_config(int vid)
877 struct internal_list *list;
878 struct ifcvf_internal *internal;
880 did = rte_vhost_get_vdpa_device_id(vid);
881 list = find_internal_resource_by_did(did);
883 DRV_LOG(ERR, "Invalid device id: %d", did);
887 internal = list->internal;
889 rte_atomic32_set(&internal->dev_attached, 1);
890 update_datapath(internal);
892 if (rte_vhost_host_notifier_ctrl(vid, true) != 0)
893 DRV_LOG(NOTICE, "vDPA (%d): software relay is used.", did);
899 ifcvf_dev_close(int vid)
902 struct internal_list *list;
903 struct ifcvf_internal *internal;
905 did = rte_vhost_get_vdpa_device_id(vid);
906 list = find_internal_resource_by_did(did);
908 DRV_LOG(ERR, "Invalid device id: %d", did);
912 internal = list->internal;
914 if (internal->sw_fallback_running) {
915 /* unset ring relay */
916 unset_vring_relay(internal);
919 m_ifcvf_stop(internal);
921 /* remove interrupt setting */
922 vdpa_disable_vfio_intr(internal);
924 /* unset DMA map for guest memory */
925 ifcvf_dma_map(internal, 0);
927 internal->sw_fallback_running = false;
929 rte_atomic32_set(&internal->dev_attached, 0);
930 update_datapath(internal);
937 ifcvf_set_features(int vid)
939 uint64_t features = 0;
941 struct internal_list *list;
942 struct ifcvf_internal *internal;
943 uint64_t log_base = 0, log_size = 0;
945 did = rte_vhost_get_vdpa_device_id(vid);
946 list = find_internal_resource_by_did(did);
948 DRV_LOG(ERR, "Invalid device id: %d", did);
952 internal = list->internal;
953 rte_vhost_get_negotiated_features(vid, &features);
955 if (!RTE_VHOST_NEED_LOG(features))
958 if (internal->sw_lm) {
959 ifcvf_sw_fallback_switchover(internal);
961 rte_vhost_get_log_base(vid, &log_base, &log_size);
962 rte_vfio_container_dma_map(internal->vfio_container_fd,
963 log_base, IFCVF_LOG_BASE, log_size);
964 ifcvf_enable_logging(&internal->hw, IFCVF_LOG_BASE, log_size);
971 ifcvf_get_vfio_group_fd(int vid)
974 struct internal_list *list;
976 did = rte_vhost_get_vdpa_device_id(vid);
977 list = find_internal_resource_by_did(did);
979 DRV_LOG(ERR, "Invalid device id: %d", did);
983 return list->internal->vfio_group_fd;
987 ifcvf_get_vfio_device_fd(int vid)
990 struct internal_list *list;
992 did = rte_vhost_get_vdpa_device_id(vid);
993 list = find_internal_resource_by_did(did);
995 DRV_LOG(ERR, "Invalid device id: %d", did);
999 return list->internal->vfio_dev_fd;
1003 ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
1006 struct internal_list *list;
1007 struct ifcvf_internal *internal;
1008 struct vfio_region_info reg = { .argsz = sizeof(reg) };
1011 did = rte_vhost_get_vdpa_device_id(vid);
1012 list = find_internal_resource_by_did(did);
1014 DRV_LOG(ERR, "Invalid device id: %d", did);
1018 internal = list->internal;
1020 reg.index = ifcvf_get_notify_region(&internal->hw);
1021 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
1023 DRV_LOG(ERR, "Get not get device region info: %s",
1028 *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset;
1035 ifcvf_get_queue_num(int did, uint32_t *queue_num)
1037 struct internal_list *list;
1039 list = find_internal_resource_by_did(did);
1041 DRV_LOG(ERR, "Invalid device id: %d", did);
1045 *queue_num = list->internal->max_queues;
1051 ifcvf_get_vdpa_features(int did, uint64_t *features)
1053 struct internal_list *list;
1055 list = find_internal_resource_by_did(did);
1057 DRV_LOG(ERR, "Invalid device id: %d", did);
1061 *features = list->internal->features;
1066 #define VDPA_SUPPORTED_PROTOCOL_FEATURES \
1067 (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
1068 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
1069 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
1070 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
1071 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD)
1073 ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features)
1075 *features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
1079 static struct rte_vdpa_dev_ops ifcvf_ops = {
1080 .get_queue_num = ifcvf_get_queue_num,
1081 .get_features = ifcvf_get_vdpa_features,
1082 .get_protocol_features = ifcvf_get_protocol_features,
1083 .dev_conf = ifcvf_dev_config,
1084 .dev_close = ifcvf_dev_close,
1085 .set_vring_state = NULL,
1086 .set_features = ifcvf_set_features,
1087 .migration_done = NULL,
1088 .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
1089 .get_vfio_device_fd = ifcvf_get_vfio_device_fd,
1090 .get_notify_area = ifcvf_get_notify_area,
1094 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1096 uint16_t *n = extra_args;
1098 if (value == NULL || extra_args == NULL)
1101 *n = (uint16_t)strtoul(value, NULL, 0);
1102 if (*n == USHRT_MAX && errno == ERANGE)
1109 ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1110 struct rte_pci_device *pci_dev)
1113 struct ifcvf_internal *internal = NULL;
1114 struct internal_list *list = NULL;
1116 int sw_fallback_lm = 0;
1117 struct rte_kvargs *kvlist = NULL;
1120 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1123 if (!pci_dev->device.devargs)
1126 kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
1127 ifcvf_valid_arguments);
1131 /* probe only when vdpa mode is specified */
1132 if (rte_kvargs_count(kvlist, IFCVF_VDPA_MODE) == 0) {
1133 rte_kvargs_free(kvlist);
1137 ret = rte_kvargs_process(kvlist, IFCVF_VDPA_MODE, &open_int,
1139 if (ret < 0 || vdpa_mode == 0) {
1140 rte_kvargs_free(kvlist);
1144 list = rte_zmalloc("ifcvf", sizeof(*list), 0);
1148 internal = rte_zmalloc("ifcvf", sizeof(*internal), 0);
1149 if (internal == NULL)
1152 internal->pdev = pci_dev;
1153 rte_spinlock_init(&internal->lock);
1155 if (ifcvf_vfio_setup(internal) < 0) {
1156 DRV_LOG(ERR, "failed to setup device %s", pci_dev->name);
1160 if (ifcvf_init_hw(&internal->hw, internal->pdev) < 0) {
1161 DRV_LOG(ERR, "failed to init device %s", pci_dev->name);
1165 internal->max_queues = IFCVF_MAX_QUEUES;
1166 features = ifcvf_get_features(&internal->hw);
1167 internal->features = (features &
1168 ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
1169 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
1170 (1ULL << VIRTIO_NET_F_CTRL_VQ) |
1171 (1ULL << VIRTIO_NET_F_STATUS) |
1172 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
1173 (1ULL << VHOST_F_LOG_ALL);
1175 internal->dev_addr.pci_addr = pci_dev->addr;
1176 internal->dev_addr.type = PCI_ADDR;
1177 list->internal = internal;
1179 if (rte_kvargs_count(kvlist, IFCVF_SW_FALLBACK_LM)) {
1180 ret = rte_kvargs_process(kvlist, IFCVF_SW_FALLBACK_LM,
1181 &open_int, &sw_fallback_lm);
1185 internal->sw_lm = sw_fallback_lm;
1187 internal->did = rte_vdpa_register_device(&internal->dev_addr,
1189 if (internal->did < 0) {
1190 DRV_LOG(ERR, "failed to register device %s", pci_dev->name);
1194 pthread_mutex_lock(&internal_list_lock);
1195 TAILQ_INSERT_TAIL(&internal_list, list, next);
1196 pthread_mutex_unlock(&internal_list_lock);
1198 rte_atomic32_set(&internal->started, 1);
1199 update_datapath(internal);
1201 rte_kvargs_free(kvlist);
1205 rte_kvargs_free(kvlist);
1212 ifcvf_pci_remove(struct rte_pci_device *pci_dev)
1214 struct ifcvf_internal *internal;
1215 struct internal_list *list;
1217 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1220 list = find_internal_resource_by_dev(pci_dev);
1222 DRV_LOG(ERR, "Invalid device: %s", pci_dev->name);
1226 internal = list->internal;
1227 rte_atomic32_set(&internal->started, 0);
1228 update_datapath(internal);
1230 rte_pci_unmap_device(internal->pdev);
1231 rte_vfio_container_destroy(internal->vfio_container_fd);
1232 rte_vdpa_unregister_device(internal->did);
1234 pthread_mutex_lock(&internal_list_lock);
1235 TAILQ_REMOVE(&internal_list, list, next);
1236 pthread_mutex_unlock(&internal_list_lock);
1245 * IFCVF has the same vendor ID and device ID as virtio net PCI
1246 * device, with its specific subsystem vendor ID and device ID.
1248 static const struct rte_pci_id pci_id_ifcvf_map[] = {
1249 { .class_id = RTE_CLASS_ANY_ID,
1250 .vendor_id = IFCVF_VENDOR_ID,
1251 .device_id = IFCVF_DEVICE_ID,
1252 .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
1253 .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
1256 { .vendor_id = 0, /* sentinel */
1260 static struct rte_pci_driver rte_ifcvf_vdpa = {
1261 .id_table = pci_id_ifcvf_map,
1263 .probe = ifcvf_pci_probe,
1264 .remove = ifcvf_pci_remove,
1267 RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
1268 RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
1269 RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");
1271 RTE_INIT(ifcvf_vdpa_init_log)
1273 ifcvf_vdpa_logtype = rte_log_register("pmd.net.ifcvf_vdpa");
1274 if (ifcvf_vdpa_logtype >= 0)
1275 rte_log_set_level(ifcvf_vdpa_logtype, RTE_LOG_NOTICE);