1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
10 #include <linux/virtio_net.h>
13 #include <rte_malloc.h>
14 #include <rte_memory.h>
15 #include <rte_bus_pci.h>
16 #include <rte_vhost.h>
19 #include <rte_spinlock.h>
21 #include <rte_kvargs.h>
22 #include <rte_devargs.h>
24 #include "base/ifcvf.h"
26 #define DRV_LOG(level, fmt, args...) \
27 rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
28 "IFCVF %s(): " fmt "\n", __func__, ##args)
31 #define PAGE_SIZE 4096
34 #define IFCVF_USED_RING_LEN(size) \
35 ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
37 #define IFCVF_VDPA_MODE "vdpa"
38 #define IFCVF_SW_FALLBACK_LM "sw-live-migration"
40 static const char * const ifcvf_valid_arguments[] = {
46 static int ifcvf_vdpa_logtype;
48 struct ifcvf_internal {
49 struct rte_vdpa_dev_addr dev_addr;
50 struct rte_pci_device *pdev;
52 int vfio_container_fd;
55 pthread_t tid; /* thread for notify relay */
61 rte_atomic32_t started;
62 rte_atomic32_t dev_attached;
63 rte_atomic32_t running;
66 bool sw_fallback_running;
67 /* mediated vring for sw fallback */
68 struct vring m_vring[IFCVF_MAX_QUEUES * 2];
71 struct internal_list {
72 TAILQ_ENTRY(internal_list) next;
73 struct ifcvf_internal *internal;
76 TAILQ_HEAD(internal_list_head, internal_list);
77 static struct internal_list_head internal_list =
78 TAILQ_HEAD_INITIALIZER(internal_list);
80 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
82 static struct internal_list *
83 find_internal_resource_by_did(int did)
86 struct internal_list *list;
88 pthread_mutex_lock(&internal_list_lock);
90 TAILQ_FOREACH(list, &internal_list, next) {
91 if (did == list->internal->did) {
97 pthread_mutex_unlock(&internal_list_lock);
105 static struct internal_list *
106 find_internal_resource_by_dev(struct rte_pci_device *pdev)
109 struct internal_list *list;
111 pthread_mutex_lock(&internal_list_lock);
113 TAILQ_FOREACH(list, &internal_list, next) {
114 if (pdev == list->internal->pdev) {
120 pthread_mutex_unlock(&internal_list_lock);
129 ifcvf_vfio_setup(struct ifcvf_internal *internal)
131 struct rte_pci_device *dev = internal->pdev;
132 char devname[RTE_DEV_NAME_MAX_LEN] = {0};
136 internal->vfio_dev_fd = -1;
137 internal->vfio_group_fd = -1;
138 internal->vfio_container_fd = -1;
140 rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
141 rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
144 internal->vfio_container_fd = rte_vfio_container_create();
145 if (internal->vfio_container_fd < 0)
148 internal->vfio_group_fd = rte_vfio_container_group_bind(
149 internal->vfio_container_fd, iommu_group_num);
150 if (internal->vfio_group_fd < 0)
153 if (rte_pci_map_device(dev))
156 internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd;
158 for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
160 internal->hw.mem_resource[i].addr =
161 internal->pdev->mem_resource[i].addr;
162 internal->hw.mem_resource[i].phys_addr =
163 internal->pdev->mem_resource[i].phys_addr;
164 internal->hw.mem_resource[i].len =
165 internal->pdev->mem_resource[i].len;
171 rte_vfio_container_destroy(internal->vfio_container_fd);
176 ifcvf_dma_map(struct ifcvf_internal *internal, int do_map)
180 struct rte_vhost_memory *mem = NULL;
181 int vfio_container_fd;
183 ret = rte_vhost_get_mem_table(internal->vid, &mem);
185 DRV_LOG(ERR, "failed to get VM memory layout.");
189 vfio_container_fd = internal->vfio_container_fd;
191 for (i = 0; i < mem->nregions; i++) {
192 struct rte_vhost_mem_region *reg;
194 reg = &mem->regions[i];
195 DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", "
196 "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".",
197 do_map ? "DMA map" : "DMA unmap", i,
198 reg->host_user_addr, reg->guest_phys_addr, reg->size);
201 ret = rte_vfio_container_dma_map(vfio_container_fd,
202 reg->host_user_addr, reg->guest_phys_addr,
205 DRV_LOG(ERR, "DMA map failed.");
209 ret = rte_vfio_container_dma_unmap(vfio_container_fd,
210 reg->host_user_addr, reg->guest_phys_addr,
213 DRV_LOG(ERR, "DMA unmap failed.");
226 hva_to_gpa(int vid, uint64_t hva)
228 struct rte_vhost_memory *mem = NULL;
229 struct rte_vhost_mem_region *reg;
233 if (rte_vhost_get_mem_table(vid, &mem) < 0)
236 for (i = 0; i < mem->nregions; i++) {
237 reg = &mem->regions[i];
239 if (hva >= reg->host_user_addr &&
240 hva < reg->host_user_addr + reg->size) {
241 gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
253 vdpa_ifcvf_start(struct ifcvf_internal *internal)
255 struct ifcvf_hw *hw = &internal->hw;
258 struct rte_vhost_vring vq;
262 nr_vring = rte_vhost_get_vring_num(vid);
263 rte_vhost_get_negotiated_features(vid, &hw->req_features);
265 for (i = 0; i < nr_vring; i++) {
266 rte_vhost_get_vhost_vring(vid, i, &vq);
267 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
269 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
272 hw->vring[i].desc = gpa;
274 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
276 DRV_LOG(ERR, "Fail to get GPA for available ring.");
279 hw->vring[i].avail = gpa;
281 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
283 DRV_LOG(ERR, "Fail to get GPA for used ring.");
286 hw->vring[i].used = gpa;
288 hw->vring[i].size = vq.size;
289 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
290 &hw->vring[i].last_used_idx);
294 return ifcvf_start_hw(&internal->hw);
298 vdpa_ifcvf_stop(struct ifcvf_internal *internal)
300 struct ifcvf_hw *hw = &internal->hw;
304 uint64_t log_base, log_size;
310 for (i = 0; i < hw->nr_vring; i++)
311 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
312 hw->vring[i].last_used_idx);
317 rte_vhost_get_negotiated_features(vid, &features);
318 if (RTE_VHOST_NEED_LOG(features)) {
319 ifcvf_disable_logging(hw);
320 rte_vhost_get_log_base(internal->vid, &log_base, &log_size);
321 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
322 log_base, IFCVF_LOG_BASE, log_size);
324 * IFCVF marks dirty memory pages for only packet buffer,
325 * SW helps to mark the used ring as dirty after device stops.
327 for (i = 0; i < hw->nr_vring; i++) {
328 len = IFCVF_USED_RING_LEN(hw->vring[i].size);
329 rte_vhost_log_used_vring(vid, i, 0, len);
334 #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
335 sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
337 vdpa_enable_vfio_intr(struct ifcvf_internal *internal)
340 uint32_t i, nr_vring;
341 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
342 struct vfio_irq_set *irq_set;
344 struct rte_vhost_vring vring;
346 nr_vring = rte_vhost_get_vring_num(internal->vid);
348 irq_set = (struct vfio_irq_set *)irq_set_buf;
349 irq_set->argsz = sizeof(irq_set_buf);
350 irq_set->count = nr_vring + 1;
351 irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
352 VFIO_IRQ_SET_ACTION_TRIGGER;
353 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
355 fd_ptr = (int *)&irq_set->data;
356 fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd;
358 for (i = 0; i < nr_vring; i++) {
359 rte_vhost_get_vhost_vring(internal->vid, i, &vring);
360 fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
363 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
365 DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s",
374 vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
377 char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
378 struct vfio_irq_set *irq_set;
380 irq_set = (struct vfio_irq_set *)irq_set_buf;
381 irq_set->argsz = sizeof(irq_set_buf);
383 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
384 irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
387 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
389 DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
398 notify_relay(void *arg)
400 int i, kickfd, epfd, nfds = 0;
402 struct epoll_event events[IFCVF_MAX_QUEUES * 2];
403 struct epoll_event ev;
406 struct rte_vhost_vring vring;
407 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
408 struct ifcvf_hw *hw = &internal->hw;
410 q_num = rte_vhost_get_vring_num(internal->vid);
412 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
414 DRV_LOG(ERR, "failed to create epoll instance.");
417 internal->epfd = epfd;
419 for (qid = 0; qid < q_num; qid++) {
420 ev.events = EPOLLIN | EPOLLPRI;
421 rte_vhost_get_vhost_vring(internal->vid, qid, &vring);
422 ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
423 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
424 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
430 nfds = epoll_wait(epfd, events, q_num, -1);
434 DRV_LOG(ERR, "epoll_wait return fail\n");
438 for (i = 0; i < nfds; i++) {
439 qid = events[i].data.u32;
440 kickfd = (uint32_t)(events[i].data.u64 >> 32);
442 nbytes = read(kickfd, &buf, 8);
444 if (errno == EINTR ||
445 errno == EWOULDBLOCK ||
448 DRV_LOG(INFO, "Error reading "
455 ifcvf_notify_queue(hw, qid);
463 setup_notify_relay(struct ifcvf_internal *internal)
467 ret = pthread_create(&internal->tid, NULL, notify_relay,
470 DRV_LOG(ERR, "failed to create notify relay pthread.");
477 unset_notify_relay(struct ifcvf_internal *internal)
482 pthread_cancel(internal->tid);
483 pthread_join(internal->tid, &status);
487 if (internal->epfd >= 0)
488 close(internal->epfd);
495 update_datapath(struct ifcvf_internal *internal)
499 rte_spinlock_lock(&internal->lock);
501 if (!rte_atomic32_read(&internal->running) &&
502 (rte_atomic32_read(&internal->started) &&
503 rte_atomic32_read(&internal->dev_attached))) {
504 ret = ifcvf_dma_map(internal, 1);
508 ret = vdpa_enable_vfio_intr(internal);
512 ret = vdpa_ifcvf_start(internal);
516 ret = setup_notify_relay(internal);
520 rte_atomic32_set(&internal->running, 1);
521 } else if (rte_atomic32_read(&internal->running) &&
522 (!rte_atomic32_read(&internal->started) ||
523 !rte_atomic32_read(&internal->dev_attached))) {
524 ret = unset_notify_relay(internal);
528 vdpa_ifcvf_stop(internal);
530 ret = vdpa_disable_vfio_intr(internal);
534 ret = ifcvf_dma_map(internal, 0);
538 rte_atomic32_set(&internal->running, 0);
541 rte_spinlock_unlock(&internal->lock);
544 rte_spinlock_unlock(&internal->lock);
549 m_ifcvf_start(struct ifcvf_internal *internal)
551 struct ifcvf_hw *hw = &internal->hw;
552 uint32_t i, nr_vring;
554 struct rte_vhost_vring vq;
556 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
561 nr_vring = rte_vhost_get_vring_num(vid);
562 rte_vhost_get_negotiated_features(vid, &hw->req_features);
564 for (i = 0; i < nr_vring; i++) {
565 rte_vhost_get_vhost_vring(vid, i, &vq);
567 size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
569 vring_buf = rte_zmalloc("ifcvf", size, PAGE_SIZE);
570 vring_init(&internal->m_vring[i], vq.size, vring_buf,
573 ret = rte_vfio_container_dma_map(internal->vfio_container_fd,
574 (uint64_t)(uintptr_t)vring_buf, m_vring_iova, size);
576 DRV_LOG(ERR, "mediated vring DMA map failed.");
580 gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
582 DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
585 hw->vring[i].desc = gpa;
587 hw->vring[i].avail = m_vring_iova +
588 (char *)internal->m_vring[i].avail -
589 (char *)internal->m_vring[i].desc;
591 hw->vring[i].used = m_vring_iova +
592 (char *)internal->m_vring[i].used -
593 (char *)internal->m_vring[i].desc;
595 hw->vring[i].size = vq.size;
597 rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
598 &hw->vring[i].last_used_idx);
600 m_vring_iova += size;
602 hw->nr_vring = nr_vring;
604 return ifcvf_start_hw(&internal->hw);
607 for (i = 0; i < nr_vring; i++)
608 if (internal->m_vring[i].desc)
609 rte_free(internal->m_vring[i].desc);
615 m_ifcvf_stop(struct ifcvf_internal *internal)
619 struct rte_vhost_vring vq;
620 struct ifcvf_hw *hw = &internal->hw;
621 uint64_t m_vring_iova = IFCVF_MEDIATED_VRING;
627 for (i = 0; i < hw->nr_vring; i++) {
628 rte_vhost_get_vhost_vring(vid, i, &vq);
629 len = IFCVF_USED_RING_LEN(vq.size);
630 rte_vhost_log_used_vring(vid, i, 0, len);
632 size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
634 rte_vfio_container_dma_unmap(internal->vfio_container_fd,
635 (uint64_t)(uintptr_t)internal->m_vring[i].desc,
638 rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
639 hw->vring[i].last_used_idx);
640 rte_free(internal->m_vring[i].desc);
641 m_vring_iova += size;
648 m_enable_vfio_intr(struct ifcvf_internal *internal)
651 struct rte_intr_handle *intr_handle = &internal->pdev->intr_handle;
654 nr_vring = rte_vhost_get_vring_num(internal->vid);
656 ret = rte_intr_efd_enable(intr_handle, nr_vring);
660 ret = rte_intr_enable(intr_handle);
668 m_disable_vfio_intr(struct ifcvf_internal *internal)
670 struct rte_intr_handle *intr_handle = &internal->pdev->intr_handle;
672 rte_intr_efd_disable(intr_handle);
673 rte_intr_disable(intr_handle);
677 update_avail_ring(struct ifcvf_internal *internal, uint16_t qid)
679 rte_vdpa_relay_vring_avail(internal->vid, qid, &internal->m_vring[qid]);
680 ifcvf_notify_queue(&internal->hw, qid);
684 update_used_ring(struct ifcvf_internal *internal, uint16_t qid)
686 rte_vdpa_relay_vring_used(internal->vid, qid, &internal->m_vring[qid]);
687 rte_vhost_vring_call(internal->vid, qid);
691 vring_relay(void *arg)
693 int i, vid, epfd, fd, nfds;
694 struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
695 struct rte_vhost_vring vring;
696 struct rte_intr_handle *intr_handle;
698 struct epoll_event events[IFCVF_MAX_QUEUES * 4];
699 struct epoll_event ev;
704 q_num = rte_vhost_get_vring_num(vid);
705 /* prepare the mediated vring */
706 for (qid = 0; qid < q_num; qid++) {
707 rte_vhost_get_vring_base(vid, qid,
708 &internal->m_vring[qid].avail->idx,
709 &internal->m_vring[qid].used->idx);
710 rte_vdpa_relay_vring_avail(vid, qid, &internal->m_vring[qid]);
713 /* add notify fd and interrupt fd to epoll */
714 epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
716 DRV_LOG(ERR, "failed to create epoll instance.");
719 internal->epfd = epfd;
721 for (qid = 0; qid < q_num; qid++) {
722 ev.events = EPOLLIN | EPOLLPRI;
723 rte_vhost_get_vhost_vring(vid, qid, &vring);
724 ev.data.u64 = qid << 1 | (uint64_t)vring.kickfd << 32;
725 if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
726 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
731 intr_handle = &internal->pdev->intr_handle;
732 for (qid = 0; qid < q_num; qid++) {
733 ev.events = EPOLLIN | EPOLLPRI;
734 ev.data.u64 = 1 | qid << 1 |
735 (uint64_t)intr_handle->efds[qid] << 32;
736 if (epoll_ctl(epfd, EPOLL_CTL_ADD, intr_handle->efds[qid], &ev)
738 DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
743 /* start relay with a first kick */
744 for (qid = 0; qid < q_num; qid++)
745 ifcvf_notify_queue(&internal->hw, qid);
747 /* listen to the events and react accordingly */
749 nfds = epoll_wait(epfd, events, q_num * 2, -1);
753 DRV_LOG(ERR, "epoll_wait return fail\n");
757 for (i = 0; i < nfds; i++) {
758 fd = (uint32_t)(events[i].data.u64 >> 32);
760 nbytes = read(fd, &buf, 8);
762 if (errno == EINTR ||
763 errno == EWOULDBLOCK ||
766 DRV_LOG(INFO, "Error reading "
773 qid = events[i].data.u32 >> 1;
775 if (events[i].data.u32 & 1)
776 update_used_ring(internal, qid);
778 update_avail_ring(internal, qid);
786 setup_vring_relay(struct ifcvf_internal *internal)
790 ret = pthread_create(&internal->tid, NULL, vring_relay,
793 DRV_LOG(ERR, "failed to create ring relay pthread.");
800 unset_vring_relay(struct ifcvf_internal *internal)
805 pthread_cancel(internal->tid);
806 pthread_join(internal->tid, &status);
810 if (internal->epfd >= 0)
811 close(internal->epfd);
818 ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal)
822 /* stop the direct IO data path */
823 unset_notify_relay(internal);
824 vdpa_ifcvf_stop(internal);
825 vdpa_disable_vfio_intr(internal);
827 ret = rte_vhost_host_notifier_ctrl(internal->vid, false);
828 if (ret && ret != -ENOTSUP)
831 /* set up interrupt for interrupt relay */
832 ret = m_enable_vfio_intr(internal);
837 ret = m_ifcvf_start(internal);
841 /* set up vring relay thread */
842 ret = setup_vring_relay(internal);
846 internal->sw_fallback_running = true;
851 m_ifcvf_stop(internal);
853 m_disable_vfio_intr(internal);
855 ifcvf_dma_map(internal, 0);
861 ifcvf_dev_config(int vid)
864 struct internal_list *list;
865 struct ifcvf_internal *internal;
867 did = rte_vhost_get_vdpa_device_id(vid);
868 list = find_internal_resource_by_did(did);
870 DRV_LOG(ERR, "Invalid device id: %d", did);
874 internal = list->internal;
876 rte_atomic32_set(&internal->dev_attached, 1);
877 update_datapath(internal);
879 if (rte_vhost_host_notifier_ctrl(vid, true) != 0)
880 DRV_LOG(NOTICE, "vDPA (%d): software relay is used.", did);
886 ifcvf_dev_close(int vid)
889 struct internal_list *list;
890 struct ifcvf_internal *internal;
892 did = rte_vhost_get_vdpa_device_id(vid);
893 list = find_internal_resource_by_did(did);
895 DRV_LOG(ERR, "Invalid device id: %d", did);
899 internal = list->internal;
901 if (internal->sw_fallback_running) {
902 /* unset ring relay */
903 unset_vring_relay(internal);
906 m_ifcvf_stop(internal);
908 /* remove interrupt setting */
909 m_disable_vfio_intr(internal);
911 /* unset DMA map for guest memory */
912 ifcvf_dma_map(internal, 0);
914 internal->sw_fallback_running = false;
916 rte_atomic32_set(&internal->dev_attached, 0);
917 update_datapath(internal);
924 ifcvf_set_features(int vid)
928 struct internal_list *list;
929 struct ifcvf_internal *internal;
930 uint64_t log_base, log_size;
932 did = rte_vhost_get_vdpa_device_id(vid);
933 list = find_internal_resource_by_did(did);
935 DRV_LOG(ERR, "Invalid device id: %d", did);
939 internal = list->internal;
940 rte_vhost_get_negotiated_features(vid, &features);
942 if (!RTE_VHOST_NEED_LOG(features))
945 if (internal->sw_lm) {
946 ifcvf_sw_fallback_switchover(internal);
948 rte_vhost_get_log_base(vid, &log_base, &log_size);
949 rte_vfio_container_dma_map(internal->vfio_container_fd,
950 log_base, IFCVF_LOG_BASE, log_size);
951 ifcvf_enable_logging(&internal->hw, IFCVF_LOG_BASE, log_size);
958 ifcvf_get_vfio_group_fd(int vid)
961 struct internal_list *list;
963 did = rte_vhost_get_vdpa_device_id(vid);
964 list = find_internal_resource_by_did(did);
966 DRV_LOG(ERR, "Invalid device id: %d", did);
970 return list->internal->vfio_group_fd;
974 ifcvf_get_vfio_device_fd(int vid)
977 struct internal_list *list;
979 did = rte_vhost_get_vdpa_device_id(vid);
980 list = find_internal_resource_by_did(did);
982 DRV_LOG(ERR, "Invalid device id: %d", did);
986 return list->internal->vfio_dev_fd;
990 ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
993 struct internal_list *list;
994 struct ifcvf_internal *internal;
995 struct vfio_region_info reg = { .argsz = sizeof(reg) };
998 did = rte_vhost_get_vdpa_device_id(vid);
999 list = find_internal_resource_by_did(did);
1001 DRV_LOG(ERR, "Invalid device id: %d", did);
1005 internal = list->internal;
1007 reg.index = ifcvf_get_notify_region(&internal->hw);
1008 ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
1010 DRV_LOG(ERR, "Get not get device region info: %s",
1015 *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset;
1022 ifcvf_get_queue_num(int did, uint32_t *queue_num)
1024 struct internal_list *list;
1026 list = find_internal_resource_by_did(did);
1028 DRV_LOG(ERR, "Invalid device id: %d", did);
1032 *queue_num = list->internal->max_queues;
1038 ifcvf_get_vdpa_features(int did, uint64_t *features)
1040 struct internal_list *list;
1042 list = find_internal_resource_by_did(did);
1044 DRV_LOG(ERR, "Invalid device id: %d", did);
1048 *features = list->internal->features;
1053 #define VDPA_SUPPORTED_PROTOCOL_FEATURES \
1054 (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
1055 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
1056 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
1057 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
1058 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD)
1060 ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features)
1062 *features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
1066 static struct rte_vdpa_dev_ops ifcvf_ops = {
1067 .get_queue_num = ifcvf_get_queue_num,
1068 .get_features = ifcvf_get_vdpa_features,
1069 .get_protocol_features = ifcvf_get_protocol_features,
1070 .dev_conf = ifcvf_dev_config,
1071 .dev_close = ifcvf_dev_close,
1072 .set_vring_state = NULL,
1073 .set_features = ifcvf_set_features,
1074 .migration_done = NULL,
1075 .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
1076 .get_vfio_device_fd = ifcvf_get_vfio_device_fd,
1077 .get_notify_area = ifcvf_get_notify_area,
1081 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1083 uint16_t *n = extra_args;
1085 if (value == NULL || extra_args == NULL)
1088 *n = (uint16_t)strtoul(value, NULL, 0);
1089 if (*n == USHRT_MAX && errno == ERANGE)
1096 ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1097 struct rte_pci_device *pci_dev)
1100 struct ifcvf_internal *internal = NULL;
1101 struct internal_list *list = NULL;
1103 int sw_fallback_lm = 0;
1104 struct rte_kvargs *kvlist = NULL;
1107 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1110 kvlist = rte_kvargs_parse(pci_dev->device.devargs->args,
1111 ifcvf_valid_arguments);
1115 /* probe only when vdpa mode is specified */
1116 if (rte_kvargs_count(kvlist, IFCVF_VDPA_MODE) == 0) {
1117 rte_kvargs_free(kvlist);
1121 ret = rte_kvargs_process(kvlist, IFCVF_VDPA_MODE, &open_int,
1123 if (ret < 0 || vdpa_mode == 0) {
1124 rte_kvargs_free(kvlist);
1128 list = rte_zmalloc("ifcvf", sizeof(*list), 0);
1132 internal = rte_zmalloc("ifcvf", sizeof(*internal), 0);
1133 if (internal == NULL)
1136 internal->pdev = pci_dev;
1137 rte_spinlock_init(&internal->lock);
1139 if (ifcvf_vfio_setup(internal) < 0) {
1140 DRV_LOG(ERR, "failed to setup device %s", pci_dev->name);
1144 if (ifcvf_init_hw(&internal->hw, internal->pdev) < 0) {
1145 DRV_LOG(ERR, "failed to init device %s", pci_dev->name);
1149 internal->max_queues = IFCVF_MAX_QUEUES;
1150 features = ifcvf_get_features(&internal->hw);
1151 internal->features = (features &
1152 ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
1153 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
1154 (1ULL << VIRTIO_NET_F_CTRL_VQ) |
1155 (1ULL << VIRTIO_NET_F_STATUS) |
1156 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
1157 (1ULL << VHOST_F_LOG_ALL);
1159 internal->dev_addr.pci_addr = pci_dev->addr;
1160 internal->dev_addr.type = PCI_ADDR;
1161 list->internal = internal;
1163 if (rte_kvargs_count(kvlist, IFCVF_SW_FALLBACK_LM)) {
1164 ret = rte_kvargs_process(kvlist, IFCVF_SW_FALLBACK_LM,
1165 &open_int, &sw_fallback_lm);
1169 internal->sw_lm = sw_fallback_lm;
1171 internal->did = rte_vdpa_register_device(&internal->dev_addr,
1173 if (internal->did < 0) {
1174 DRV_LOG(ERR, "failed to register device %s", pci_dev->name);
1178 pthread_mutex_lock(&internal_list_lock);
1179 TAILQ_INSERT_TAIL(&internal_list, list, next);
1180 pthread_mutex_unlock(&internal_list_lock);
1182 rte_atomic32_set(&internal->started, 1);
1183 update_datapath(internal);
1185 rte_kvargs_free(kvlist);
1189 rte_kvargs_free(kvlist);
1196 ifcvf_pci_remove(struct rte_pci_device *pci_dev)
1198 struct ifcvf_internal *internal;
1199 struct internal_list *list;
1201 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1204 list = find_internal_resource_by_dev(pci_dev);
1206 DRV_LOG(ERR, "Invalid device: %s", pci_dev->name);
1210 internal = list->internal;
1211 rte_atomic32_set(&internal->started, 0);
1212 update_datapath(internal);
1214 rte_pci_unmap_device(internal->pdev);
1215 rte_vfio_container_destroy(internal->vfio_container_fd);
1216 rte_vdpa_unregister_device(internal->did);
1218 pthread_mutex_lock(&internal_list_lock);
1219 TAILQ_REMOVE(&internal_list, list, next);
1220 pthread_mutex_unlock(&internal_list_lock);
1229 * IFCVF has the same vendor ID and device ID as virtio net PCI
1230 * device, with its specific subsystem vendor ID and device ID.
1232 static const struct rte_pci_id pci_id_ifcvf_map[] = {
1233 { .class_id = RTE_CLASS_ANY_ID,
1234 .vendor_id = IFCVF_VENDOR_ID,
1235 .device_id = IFCVF_DEVICE_ID,
1236 .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
1237 .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
1240 { .vendor_id = 0, /* sentinel */
1244 static struct rte_pci_driver rte_ifcvf_vdpa = {
1245 .id_table = pci_id_ifcvf_map,
1247 .probe = ifcvf_pci_probe,
1248 .remove = ifcvf_pci_remove,
1251 RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
1252 RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
1253 RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");
1255 RTE_INIT(ifcvf_vdpa_init_log)
1257 ifcvf_vdpa_logtype = rte_log_register("pmd.net.ifcvf_vdpa");
1258 if (ifcvf_vdpa_logtype >= 0)
1259 rte_log_set_level(ifcvf_vdpa_logtype, RTE_LOG_NOTICE);