X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fifc%2Fifcvf_vdpa.c;h=8de9ef1999fba707e9ba2c7d02623f440937c6fe;hb=b06a398ba9ae7b0fb6f225591c86a7fd05b480d8;hp=88d8140377e47fc6eb936ee93f9f72081a339baf;hpb=f8e9989606e7a9548a25ac10e2daae4c8af230cb;p=dpdk.git diff --git a/drivers/net/ifc/ifcvf_vdpa.c b/drivers/net/ifc/ifcvf_vdpa.c index 88d8140377..8de9ef1999 100644 --- a/drivers/net/ifc/ifcvf_vdpa.c +++ b/drivers/net/ifc/ifcvf_vdpa.c @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include @@ -16,17 +18,31 @@ #include #include #include +#include +#include #include "base/ifcvf.h" #define DRV_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \ - "%s(): " fmt "\n", __func__, ##args) + "IFCVF %s(): " fmt "\n", __func__, ##args) #ifndef PAGE_SIZE #define PAGE_SIZE 4096 #endif +#define IFCVF_USED_RING_LEN(size) \ + ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3) + +#define IFCVF_VDPA_MODE "vdpa" +#define IFCVF_SW_FALLBACK_LM "sw-live-migration" + +static const char * const ifcvf_valid_arguments[] = { + IFCVF_VDPA_MODE, + IFCVF_SW_FALLBACK_LM, + NULL +}; + static int ifcvf_vdpa_logtype; struct ifcvf_internal { @@ -46,6 +62,12 @@ struct ifcvf_internal { rte_atomic32_t dev_attached; rte_atomic32_t running; rte_spinlock_t lock; + bool sw_lm; + bool sw_fallback_running; + /* mediated vring for sw fallback */ + struct vring m_vring[IFCVF_MAX_QUEUES * 2]; + /* eventfd for used ring interrupt */ + int intr_fd[IFCVF_MAX_QUEUES * 2]; }; struct internal_list { @@ -59,6 +81,8 @@ static struct internal_list_head internal_list = static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER; +static void update_used_ring(struct ifcvf_internal *internal, uint16_t qid); + static struct internal_list * find_internal_resource_by_did(int did) { @@ -111,7 +135,6 @@ ifcvf_vfio_setup(struct ifcvf_internal *internal) struct rte_pci_device *dev = internal->pdev; char devname[RTE_DEV_NAME_MAX_LEN] = {0}; int iommu_group_num; - int ret = 0; int i; internal->vfio_dev_fd = -1; @@ -145,9 +168,8 @@ ifcvf_vfio_setup(struct ifcvf_internal *internal) internal->hw.mem_resource[i].len = internal->pdev->mem_resource[i].len; } - ret = ifcvf_init_hw(&internal->hw, internal->pdev); - return ret; + return 0; err: rte_vfio_container_destroy(internal->vfio_container_fd); @@ -205,7 +227,7 @@ exit: } static uint64_t -qva_to_gpa(int vid, uint64_t qva) +hva_to_gpa(int vid, uint64_t hva) { struct rte_vhost_memory *mem = NULL; struct rte_vhost_mem_region *reg; @@ -218,9 +240,9 @@ qva_to_gpa(int vid, uint64_t qva) for (i = 0; i < mem->nregions; i++) { reg = &mem->regions[i]; - if (qva >= reg->host_user_addr && - qva < reg->host_user_addr + reg->size) { - gpa = qva - reg->host_user_addr + reg->guest_phys_addr; + if (hva >= reg->host_user_addr && + hva < reg->host_user_addr + reg->size) { + gpa = hva - reg->host_user_addr + reg->guest_phys_addr; break; } } @@ -246,21 +268,21 @@ vdpa_ifcvf_start(struct ifcvf_internal *internal) for (i = 0; i < nr_vring; i++) { rte_vhost_get_vhost_vring(vid, i, &vq); - gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc); + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc); if (gpa == 0) { DRV_LOG(ERR, "Fail to get GPA for descriptor ring."); return -1; } hw->vring[i].desc = gpa; - gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail); + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail); if (gpa == 0) { DRV_LOG(ERR, "Fail to get GPA for available ring."); return -1; } hw->vring[i].avail = gpa; - gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used); + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used); if (gpa == 0) { DRV_LOG(ERR, "Fail to get GPA for used ring."); return -1; @@ -282,6 +304,9 @@ vdpa_ifcvf_stop(struct ifcvf_internal *internal) struct ifcvf_hw *hw = &internal->hw; uint32_t i; int vid; + uint64_t features; + uint64_t log_base, log_size; + uint64_t len; vid = internal->vid; ifcvf_stop_hw(hw); @@ -289,12 +314,31 @@ vdpa_ifcvf_stop(struct ifcvf_internal *internal) for (i = 0; i < hw->nr_vring; i++) rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx, hw->vring[i].last_used_idx); + + if (internal->sw_lm) + return; + + rte_vhost_get_negotiated_features(vid, &features); + if (RTE_VHOST_NEED_LOG(features)) { + ifcvf_disable_logging(hw); + rte_vhost_get_log_base(internal->vid, &log_base, &log_size); + rte_vfio_container_dma_unmap(internal->vfio_container_fd, + log_base, IFCVF_LOG_BASE, log_size); + /* + * IFCVF marks dirty memory pages for only packet buffer, + * SW helps to mark the used ring as dirty after device stops. + */ + for (i = 0; i < hw->nr_vring; i++) { + len = IFCVF_USED_RING_LEN(hw->vring[i].size); + rte_vhost_log_used_vring(vid, i, 0, len); + } + } } #define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \ sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1)) static int -vdpa_enable_vfio_intr(struct ifcvf_internal *internal) +vdpa_enable_vfio_intr(struct ifcvf_internal *internal, bool m_rx) { int ret; uint32_t i, nr_vring; @@ -302,6 +346,7 @@ vdpa_enable_vfio_intr(struct ifcvf_internal *internal) struct vfio_irq_set *irq_set; int *fd_ptr; struct rte_vhost_vring vring; + int fd; nr_vring = rte_vhost_get_vring_num(internal->vid); @@ -315,9 +360,22 @@ vdpa_enable_vfio_intr(struct ifcvf_internal *internal) fd_ptr = (int *)&irq_set->data; fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd; + for (i = 0; i < nr_vring; i++) + internal->intr_fd[i] = -1; + for (i = 0; i < nr_vring; i++) { rte_vhost_get_vhost_vring(internal->vid, i, &vring); fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd; + if ((i & 1) == 0 && m_rx == true) { + fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); + if (fd < 0) { + DRV_LOG(ERR, "can't setup eventfd: %s", + strerror(errno)); + return -1; + } + internal->intr_fd[i] = fd; + fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = fd; + } } ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); @@ -334,6 +392,7 @@ static int vdpa_disable_vfio_intr(struct ifcvf_internal *internal) { int ret; + uint32_t i, nr_vring; char irq_set_buf[MSIX_IRQ_SET_BUF_LEN]; struct vfio_irq_set *irq_set; @@ -344,6 +403,13 @@ vdpa_disable_vfio_intr(struct ifcvf_internal *internal) irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; irq_set->start = 0; + nr_vring = rte_vhost_get_vring_num(internal->vid); + for (i = 0; i < nr_vring; i++) { + if (internal->intr_fd[i] >= 0) + close(internal->intr_fd[i]); + internal->intr_fd[i] = -1; + } + ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); if (ret) { DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s", @@ -465,15 +531,15 @@ update_datapath(struct ifcvf_internal *internal) if (ret) goto err; - ret = vdpa_enable_vfio_intr(internal); + ret = vdpa_enable_vfio_intr(internal, 0); if (ret) goto err; - ret = setup_notify_relay(internal); + ret = vdpa_ifcvf_start(internal); if (ret) goto err; - ret = vdpa_ifcvf_start(internal); + ret = setup_notify_relay(internal); if (ret) goto err; @@ -481,12 +547,12 @@ update_datapath(struct ifcvf_internal *internal) } else if (rte_atomic32_read(&internal->running) && (!rte_atomic32_read(&internal->started) || !rte_atomic32_read(&internal->dev_attached))) { - vdpa_ifcvf_stop(internal); - ret = unset_notify_relay(internal); if (ret) goto err; + vdpa_ifcvf_stop(internal); + ret = vdpa_disable_vfio_intr(internal); if (ret) goto err; @@ -505,6 +571,299 @@ err: return ret; } +static int +m_ifcvf_start(struct ifcvf_internal *internal) +{ + struct ifcvf_hw *hw = &internal->hw; + uint32_t i, nr_vring; + int vid, ret; + struct rte_vhost_vring vq; + void *vring_buf; + uint64_t m_vring_iova = IFCVF_MEDIATED_VRING; + uint64_t size; + uint64_t gpa; + + vid = internal->vid; + nr_vring = rte_vhost_get_vring_num(vid); + rte_vhost_get_negotiated_features(vid, &hw->req_features); + + for (i = 0; i < nr_vring; i++) { + rte_vhost_get_vhost_vring(vid, i, &vq); + + size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE), + PAGE_SIZE); + vring_buf = rte_zmalloc("ifcvf", size, PAGE_SIZE); + vring_init(&internal->m_vring[i], vq.size, vring_buf, + PAGE_SIZE); + + ret = rte_vfio_container_dma_map(internal->vfio_container_fd, + (uint64_t)(uintptr_t)vring_buf, m_vring_iova, size); + if (ret < 0) { + DRV_LOG(ERR, "mediated vring DMA map failed."); + goto error; + } + + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for descriptor ring."); + return -1; + } + hw->vring[i].desc = gpa; + + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for available ring."); + return -1; + } + hw->vring[i].avail = gpa; + + /* Direct I/O for Tx queue, relay for Rx queue */ + if (i & 1) { + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for used ring."); + return -1; + } + hw->vring[i].used = gpa; + } else { + hw->vring[i].used = m_vring_iova + + (char *)internal->m_vring[i].used - + (char *)internal->m_vring[i].desc; + } + + hw->vring[i].size = vq.size; + + rte_vhost_get_vring_base(vid, i, + &internal->m_vring[i].avail->idx, + &internal->m_vring[i].used->idx); + + rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx, + &hw->vring[i].last_used_idx); + + m_vring_iova += size; + } + hw->nr_vring = nr_vring; + + return ifcvf_start_hw(&internal->hw); + +error: + for (i = 0; i < nr_vring; i++) + if (internal->m_vring[i].desc) + rte_free(internal->m_vring[i].desc); + + return -1; +} + +static int +m_ifcvf_stop(struct ifcvf_internal *internal) +{ + int vid; + uint32_t i; + struct rte_vhost_vring vq; + struct ifcvf_hw *hw = &internal->hw; + uint64_t m_vring_iova = IFCVF_MEDIATED_VRING; + uint64_t size, len; + + vid = internal->vid; + ifcvf_stop_hw(hw); + + for (i = 0; i < hw->nr_vring; i++) { + /* synchronize remaining new used entries if any */ + if ((i & 1) == 0) + update_used_ring(internal, i); + + rte_vhost_get_vhost_vring(vid, i, &vq); + len = IFCVF_USED_RING_LEN(vq.size); + rte_vhost_log_used_vring(vid, i, 0, len); + + size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE), + PAGE_SIZE); + rte_vfio_container_dma_unmap(internal->vfio_container_fd, + (uint64_t)(uintptr_t)internal->m_vring[i].desc, + m_vring_iova, size); + + rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx, + hw->vring[i].last_used_idx); + rte_free(internal->m_vring[i].desc); + m_vring_iova += size; + } + + return 0; +} + +static void +update_used_ring(struct ifcvf_internal *internal, uint16_t qid) +{ + rte_vdpa_relay_vring_used(internal->vid, qid, &internal->m_vring[qid]); + rte_vhost_vring_call(internal->vid, qid); +} + +static void * +vring_relay(void *arg) +{ + int i, vid, epfd, fd, nfds; + struct ifcvf_internal *internal = (struct ifcvf_internal *)arg; + struct rte_vhost_vring vring; + uint16_t qid, q_num; + struct epoll_event events[IFCVF_MAX_QUEUES * 4]; + struct epoll_event ev; + int nbytes; + uint64_t buf; + + vid = internal->vid; + q_num = rte_vhost_get_vring_num(vid); + + /* add notify fd and interrupt fd to epoll */ + epfd = epoll_create(IFCVF_MAX_QUEUES * 2); + if (epfd < 0) { + DRV_LOG(ERR, "failed to create epoll instance."); + return NULL; + } + internal->epfd = epfd; + + for (qid = 0; qid < q_num; qid++) { + ev.events = EPOLLIN | EPOLLPRI; + rte_vhost_get_vhost_vring(vid, qid, &vring); + ev.data.u64 = qid << 1 | (uint64_t)vring.kickfd << 32; + if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) { + DRV_LOG(ERR, "epoll add error: %s", strerror(errno)); + return NULL; + } + } + + for (qid = 0; qid < q_num; qid += 2) { + ev.events = EPOLLIN | EPOLLPRI; + /* leave a flag to mark it's for interrupt */ + ev.data.u64 = 1 | qid << 1 | + (uint64_t)internal->intr_fd[qid] << 32; + if (epoll_ctl(epfd, EPOLL_CTL_ADD, internal->intr_fd[qid], &ev) + < 0) { + DRV_LOG(ERR, "epoll add error: %s", strerror(errno)); + return NULL; + } + update_used_ring(internal, qid); + } + + /* start relay with a first kick */ + for (qid = 0; qid < q_num; qid++) + ifcvf_notify_queue(&internal->hw, qid); + + /* listen to the events and react accordingly */ + for (;;) { + nfds = epoll_wait(epfd, events, q_num * 2, -1); + if (nfds < 0) { + if (errno == EINTR) + continue; + DRV_LOG(ERR, "epoll_wait return fail\n"); + return NULL; + } + + for (i = 0; i < nfds; i++) { + fd = (uint32_t)(events[i].data.u64 >> 32); + do { + nbytes = read(fd, &buf, 8); + if (nbytes < 0) { + if (errno == EINTR || + errno == EWOULDBLOCK || + errno == EAGAIN) + continue; + DRV_LOG(INFO, "Error reading " + "kickfd: %s", + strerror(errno)); + } + break; + } while (1); + + qid = events[i].data.u32 >> 1; + + if (events[i].data.u32 & 1) + update_used_ring(internal, qid); + else + ifcvf_notify_queue(&internal->hw, qid); + } + } + + return NULL; +} + +static int +setup_vring_relay(struct ifcvf_internal *internal) +{ + int ret; + + ret = pthread_create(&internal->tid, NULL, vring_relay, + (void *)internal); + if (ret) { + DRV_LOG(ERR, "failed to create ring relay pthread."); + return -1; + } + return 0; +} + +static int +unset_vring_relay(struct ifcvf_internal *internal) +{ + void *status; + + if (internal->tid) { + pthread_cancel(internal->tid); + pthread_join(internal->tid, &status); + } + internal->tid = 0; + + if (internal->epfd >= 0) + close(internal->epfd); + internal->epfd = -1; + + return 0; +} + +static int +ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal) +{ + int ret; + int vid = internal->vid; + + /* stop the direct IO data path */ + unset_notify_relay(internal); + vdpa_ifcvf_stop(internal); + vdpa_disable_vfio_intr(internal); + + ret = rte_vhost_host_notifier_ctrl(vid, false); + if (ret && ret != -ENOTSUP) + goto error; + + /* set up interrupt for interrupt relay */ + ret = vdpa_enable_vfio_intr(internal, 1); + if (ret) + goto unmap; + + /* config the VF */ + ret = m_ifcvf_start(internal); + if (ret) + goto unset_intr; + + /* set up vring relay thread */ + ret = setup_vring_relay(internal); + if (ret) + goto stop_vf; + + rte_vhost_host_notifier_ctrl(vid, true); + + internal->sw_fallback_running = true; + + return 0; + +stop_vf: + m_ifcvf_stop(internal); +unset_intr: + vdpa_disable_vfio_intr(internal); +unmap: + ifcvf_dma_map(internal, 0); +error: + return -1; +} + static int ifcvf_dev_config(int vid) { @@ -524,6 +883,9 @@ ifcvf_dev_config(int vid) rte_atomic32_set(&internal->dev_attached, 1); update_datapath(internal); + if (rte_vhost_host_notifier_ctrl(vid, true) != 0) + DRV_LOG(NOTICE, "vDPA (%d): software relay is used.", did); + return 0; } @@ -542,8 +904,59 @@ ifcvf_dev_close(int vid) } internal = list->internal; - rte_atomic32_set(&internal->dev_attached, 0); - update_datapath(internal); + + if (internal->sw_fallback_running) { + /* unset ring relay */ + unset_vring_relay(internal); + + /* reset VF */ + m_ifcvf_stop(internal); + + /* remove interrupt setting */ + vdpa_disable_vfio_intr(internal); + + /* unset DMA map for guest memory */ + ifcvf_dma_map(internal, 0); + + internal->sw_fallback_running = false; + } else { + rte_atomic32_set(&internal->dev_attached, 0); + update_datapath(internal); + } + + return 0; +} + +static int +ifcvf_set_features(int vid) +{ + uint64_t features; + int did; + struct internal_list *list; + struct ifcvf_internal *internal; + uint64_t log_base, log_size; + + did = rte_vhost_get_vdpa_device_id(vid); + list = find_internal_resource_by_did(did); + if (list == NULL) { + DRV_LOG(ERR, "Invalid device id: %d", did); + return -1; + } + + internal = list->internal; + rte_vhost_get_negotiated_features(vid, &features); + + if (!RTE_VHOST_NEED_LOG(features)) + return 0; + + if (internal->sw_lm) { + ifcvf_sw_fallback_switchover(internal); + } else { + rte_vhost_get_log_base(vid, &log_base, &log_size); + rte_vfio_container_dma_map(internal->vfio_container_fd, + log_base, IFCVF_LOG_BASE, log_size); + ifcvf_enable_logging(&internal->hw, IFCVF_LOG_BASE, log_size); + } return 0; } @@ -657,20 +1070,35 @@ ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features) return 0; } -struct rte_vdpa_dev_ops ifcvf_ops = { +static struct rte_vdpa_dev_ops ifcvf_ops = { .get_queue_num = ifcvf_get_queue_num, .get_features = ifcvf_get_vdpa_features, .get_protocol_features = ifcvf_get_protocol_features, .dev_conf = ifcvf_dev_config, .dev_close = ifcvf_dev_close, .set_vring_state = NULL, - .set_features = NULL, + .set_features = ifcvf_set_features, .migration_done = NULL, .get_vfio_group_fd = ifcvf_get_vfio_group_fd, .get_vfio_device_fd = ifcvf_get_vfio_device_fd, .get_notify_area = ifcvf_get_notify_area, }; +static inline int +open_int(const char *key __rte_unused, const char *value, void *extra_args) +{ + uint16_t *n = extra_args; + + if (value == NULL || extra_args == NULL) + return -EINVAL; + + *n = (uint16_t)strtoul(value, NULL, 0); + if (*n == USHRT_MAX && errno == ERANGE) + return -1; + + return 0; +} + static int ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) @@ -678,10 +1106,35 @@ ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, uint64_t features; struct ifcvf_internal *internal = NULL; struct internal_list *list = NULL; + int vdpa_mode = 0; + int sw_fallback_lm = 0; + struct rte_kvargs *kvlist = NULL; + int ret = 0; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; + if (!pci_dev->device.devargs) + return 1; + + kvlist = rte_kvargs_parse(pci_dev->device.devargs->args, + ifcvf_valid_arguments); + if (kvlist == NULL) + return 1; + + /* probe only when vdpa mode is specified */ + if (rte_kvargs_count(kvlist, IFCVF_VDPA_MODE) == 0) { + rte_kvargs_free(kvlist); + return 1; + } + + ret = rte_kvargs_process(kvlist, IFCVF_VDPA_MODE, &open_int, + &vdpa_mode); + if (ret < 0 || vdpa_mode == 0) { + rte_kvargs_free(kvlist); + return 1; + } + list = rte_zmalloc("ifcvf", sizeof(*list), 0); if (list == NULL) goto error; @@ -692,34 +1145,58 @@ ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, internal->pdev = pci_dev; rte_spinlock_init(&internal->lock); - if (ifcvf_vfio_setup(internal) < 0) - return -1; + + if (ifcvf_vfio_setup(internal) < 0) { + DRV_LOG(ERR, "failed to setup device %s", pci_dev->name); + goto error; + } + + if (ifcvf_init_hw(&internal->hw, internal->pdev) < 0) { + DRV_LOG(ERR, "failed to init device %s", pci_dev->name); + goto error; + } internal->max_queues = IFCVF_MAX_QUEUES; features = ifcvf_get_features(&internal->hw); internal->features = (features & ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) | - (1ULL << VHOST_USER_F_PROTOCOL_FEATURES); + (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | + (1ULL << VIRTIO_NET_F_CTRL_VQ) | + (1ULL << VIRTIO_NET_F_STATUS) | + (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | + (1ULL << VHOST_F_LOG_ALL); internal->dev_addr.pci_addr = pci_dev->addr; internal->dev_addr.type = PCI_ADDR; list->internal = internal; - pthread_mutex_lock(&internal_list_lock); - TAILQ_INSERT_TAIL(&internal_list, list, next); - pthread_mutex_unlock(&internal_list_lock); + if (rte_kvargs_count(kvlist, IFCVF_SW_FALLBACK_LM)) { + ret = rte_kvargs_process(kvlist, IFCVF_SW_FALLBACK_LM, + &open_int, &sw_fallback_lm); + if (ret < 0) + goto error; + } + internal->sw_lm = sw_fallback_lm; internal->did = rte_vdpa_register_device(&internal->dev_addr, &ifcvf_ops); - if (internal->did < 0) + if (internal->did < 0) { + DRV_LOG(ERR, "failed to register device %s", pci_dev->name); goto error; + } + + pthread_mutex_lock(&internal_list_lock); + TAILQ_INSERT_TAIL(&internal_list, list, next); + pthread_mutex_unlock(&internal_list_lock); rte_atomic32_set(&internal->started, 1); update_datapath(internal); + rte_kvargs_free(kvlist); return 0; error: + rte_kvargs_free(kvlist); rte_free(list); rte_free(internal); return -1;