X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost.c;h=52ab93d1ecf2996e809672a93417e877846e748a;hb=e863fe3a13da89787fdf3b5c590101a3c0f10af6;hp=a8ef55d627fe22029946c4340a23caea6b75393d;hpb=4d891f77ddfaf9fbdaaf97a5d6269cffd94b8b9b;p=dpdk.git diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index a8ef55d627..52ab93d1ec 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -26,6 +26,7 @@ #include "vhost_user.h" struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; +pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER; /* Called with iotlb_lock read-locked */ uint64_t @@ -57,7 +58,7 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, vhost_user_iotlb_pending_insert(vq, iova, perm); if (vhost_user_iotlb_miss(dev, iova, perm)) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n", iova); vhost_user_iotlb_pending_remove(vq, iova, 1, perm); @@ -106,7 +107,7 @@ __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len) return; /* To make sure guest memory updates are committed before logging */ - rte_smp_wmb(); + rte_atomic_thread_fence(__ATOMIC_RELEASE); page = addr / VHOST_LOG_PAGE; while (page * VHOST_LOG_PAGE < addr + len) { @@ -124,7 +125,7 @@ __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); if (map_len != len) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_DATA(ERR, "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", iova); return; @@ -144,7 +145,7 @@ __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) if (unlikely(!dev->log_base)) return; - rte_smp_wmb(); + rte_atomic_thread_fence(__ATOMIC_RELEASE); log_base = (unsigned long *)(uintptr_t)dev->log_base; @@ -163,7 +164,7 @@ __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) #endif } - rte_smp_wmb(); + rte_atomic_thread_fence(__ATOMIC_RELEASE); vq->log_cache_nb_elem = 0; } @@ -190,7 +191,7 @@ vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq, * No more room for a new log cache entry, * so write the dirty log map directly. */ - rte_smp_wmb(); + rte_atomic_thread_fence(__ATOMIC_RELEASE); vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); return; @@ -229,7 +230,7 @@ __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); if (map_len != len) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_DATA(ERR, "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", iova); return; @@ -324,13 +325,33 @@ cleanup_device(struct virtio_net *dev, int destroy) } } +static void +vhost_free_async_mem(struct vhost_virtqueue *vq) +{ + if (vq->async_pkts_info) + rte_free(vq->async_pkts_info); + if (vq->async_descs_split) + rte_free(vq->async_descs_split); + if (vq->it_pool) + rte_free(vq->it_pool); + if (vq->vec_pool) + rte_free(vq->vec_pool); + + vq->async_pkts_info = NULL; + vq->async_descs_split = NULL; + vq->it_pool = NULL; + vq->vec_pool = NULL; +} + void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq) { if (vq_is_packed(dev)) rte_free(vq->shadow_used_packed); - else + else { rte_free(vq->shadow_used_split); + vhost_free_async_mem(vq); + } rte_free(vq->batch_copy_elems); rte_mempool_free(vq->iotlb_pool); rte_free(vq); @@ -350,6 +371,57 @@ free_device(struct virtio_net *dev) rte_free(dev); } +static __rte_always_inline int +log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + if (likely(!(vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG)))) + return 0; + + vq->log_guest_addr = translate_log_addr(dev, vq, + vq->ring_addrs.log_guest_addr); + if (vq->log_guest_addr == 0) + return -1; + + return 0; +} + +/* + * Converts vring log address to GPA + * If IOMMU is enabled, the log address is IOVA + * If IOMMU not enabled, the log address is already GPA + * + * Caller should have iotlb_lock read-locked + */ +uint64_t +translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t log_addr) +{ + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) { + const uint64_t exp_size = sizeof(uint64_t); + uint64_t hva, gpa; + uint64_t size = exp_size; + + hva = vhost_iova_to_vva(dev, vq, log_addr, + &size, VHOST_ACCESS_RW); + + if (size != exp_size) + return 0; + + gpa = hva_to_gpa(dev, hva, exp_size); + if (!gpa) { + VHOST_LOG_CONFIG(ERR, + "VQ: Failed to find GPA for log_addr: 0x%" + PRIx64 " hva: 0x%" PRIx64 "\n", + log_addr, hva); + return 0; + } + return gpa; + + } else + return log_addr; +} + +/* Caller should have iotlb_lock read-locked */ static int vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq) { @@ -388,6 +460,7 @@ vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq) return 0; } +/* Caller should have iotlb_lock read-locked */ static int vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) { @@ -434,6 +507,10 @@ vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) if (vring_translate_split(dev, vq) < 0) return -1; } + + if (log_translate(dev, vq) < 0) + return -1; + vq->access_ok = 1; return 0; @@ -461,24 +538,28 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) struct vhost_virtqueue *vq; if (vring_idx >= VHOST_MAX_VRING) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "Failed not init vring, out of bound (%d)\n", vring_idx); return; } vq = dev->virtqueue[vring_idx]; + if (!vq) { + VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n", + vring_idx); + return; + } memset(vq, 0, sizeof(struct vhost_virtqueue)); vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; + vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF; vhost_user_iotlb_init(dev, vring_idx); /* Backends are set to -1 indicating an inactive device. */ vq->backend = -1; - - TAILQ_INIT(&vq->zmbuf_list); } static void @@ -488,13 +569,19 @@ reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx) int callfd; if (vring_idx >= VHOST_MAX_VRING) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "Failed not init vring, out of bound (%d)\n", vring_idx); return; } vq = dev->virtqueue[vring_idx]; + if (!vq) { + VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n", + vring_idx); + return; + } + callfd = vq->callfd; init_vring_queue(dev, vring_idx); vq->callfd = callfd; @@ -504,22 +591,29 @@ int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx) { struct vhost_virtqueue *vq; + uint32_t i; - vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0); - if (vq == NULL) { - RTE_LOG(ERR, VHOST_CONFIG, - "Failed to allocate memory for vring:%u.\n", vring_idx); - return -1; - } + /* Also allocate holes, if any, up to requested vring index. */ + for (i = 0; i <= vring_idx; i++) { + if (dev->virtqueue[i]) + continue; - dev->virtqueue[vring_idx] = vq; - init_vring_queue(dev, vring_idx); - rte_spinlock_init(&vq->access_lock); - vq->avail_wrap_counter = 1; - vq->used_wrap_counter = 1; - vq->signalled_used_valid = false; + vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0); + if (vq == NULL) { + VHOST_LOG_CONFIG(ERR, + "Failed to allocate memory for vring:%u.\n", i); + return -1; + } + + dev->virtqueue[i] = vq; + init_vring_queue(dev, i); + rte_spinlock_init(&vq->access_lock); + vq->avail_wrap_counter = 1; + vq->used_wrap_counter = 1; + vq->signalled_used_valid = false; + } - dev->nr_vring += 1; + dev->nr_vring = RTE_MAX(dev->nr_vring, vring_idx + 1); return 0; } @@ -552,29 +646,33 @@ vhost_new_device(void) struct virtio_net *dev; int i; + pthread_mutex_lock(&vhost_dev_lock); for (i = 0; i < MAX_VHOST_DEVICE; i++) { if (vhost_devices[i] == NULL) break; } if (i == MAX_VHOST_DEVICE) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "Failed to find a free slot for new device.\n"); + pthread_mutex_unlock(&vhost_dev_lock); return -1; } dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0); if (dev == NULL) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "Failed to allocate memory for new dev.\n"); + pthread_mutex_unlock(&vhost_dev_lock); return -1; } vhost_devices[i] = dev; + pthread_mutex_unlock(&vhost_dev_lock); + dev->vid = i; dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET; dev->slave_req_fd = -1; - dev->vdpa_dev_id = -1; dev->postcopy_ufd = -1; rte_spinlock_init(&dev->slave_req_lock); @@ -585,12 +683,10 @@ void vhost_destroy_device_notify(struct virtio_net *dev) { struct rte_vdpa_device *vdpa_dev; - int did; if (dev->flags & VIRTIO_DEV_RUNNING) { - did = dev->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); - if (vdpa_dev && vdpa_dev->ops->dev_close) + vdpa_dev = dev->vdpa_dev; + if (vdpa_dev) vdpa_dev->ops->dev_close(dev->vid); dev->flags &= ~VIRTIO_DEV_RUNNING; dev->notify_ops->destroy_device(dev->vid); @@ -618,17 +714,14 @@ vhost_destroy_device(int vid) } void -vhost_attach_vdpa_device(int vid, int did) +vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *vdpa_dev) { struct virtio_net *dev = get_device(vid); if (dev == NULL) return; - if (rte_vdpa_get_device(did) == NULL) - return; - - dev->vdpa_dev_id = did; + dev->vdpa_dev = vdpa_dev; } void @@ -649,28 +742,39 @@ vhost_set_ifname(int vid, const char *if_name, unsigned int if_len) } void -vhost_enable_dequeue_zero_copy(int vid) +vhost_set_builtin_virtio_net(int vid, bool enable) { struct virtio_net *dev = get_device(vid); if (dev == NULL) return; - dev->dequeue_zero_copy = 1; + if (enable) + dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET; + else + dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET; } void -vhost_set_builtin_virtio_net(int vid, bool enable) +vhost_enable_extbuf(int vid) { struct virtio_net *dev = get_device(vid); if (dev == NULL) return; - if (enable) - dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET; - else - dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET; + dev->extbuf = 1; +} + +void +vhost_enable_linearbuf(int vid) +{ + struct virtio_net *dev = get_device(vid); + + if (dev == NULL) + return; + + dev->linearbuf = 1; } int @@ -706,7 +810,7 @@ rte_vhost_get_numa_node(int vid) ret = get_mempolicy(&numa_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret < 0) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "(%d) failed to query numa node: %s\n", vid, rte_strerror(errno)); return -1; @@ -811,9 +915,15 @@ rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx, if (!vq) return -1; - vring->desc = vq->desc; - vring->avail = vq->avail; - vring->used = vq->used; + if (vq_is_packed(dev)) { + vring->desc_packed = vq->desc_packed; + vring->driver_event = vq->driver_event; + vring->device_event = vq->device_event; + } else { + vring->desc = vq->desc; + vring->avail = vq->avail; + vring->used = vq->used; + } vring->log_guest_addr = vq->log_guest_addr; vring->callfd = vq->callfd; @@ -993,11 +1103,11 @@ rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx, if (unlikely(idx >= vq->size)) return -1; - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); vq->inflight_split->desc[idx].inflight = 0; - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); vq->inflight_split->used_idx = last_used_idx; return 0; @@ -1036,11 +1146,11 @@ rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx, if (unlikely(head >= vq->size)) return -1; - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); inflight_info->desc[head].inflight = 0; - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); inflight_info->old_free_head = inflight_info->free_head; inflight_info->old_used_idx = inflight_info->used_idx; @@ -1167,7 +1277,12 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id) if (!dev) return 0; + if (queue_id >= VHOST_MAX_VRING) + return 0; + vq = dev->virtqueue[queue_id]; + if (!vq) + return 0; rte_spinlock_lock(&vq->access_lock); @@ -1221,12 +1336,29 @@ vhost_enable_notify_packed(struct virtio_net *dev, vq->avail_wrap_counter << 15; } - rte_smp_wmb(); + rte_atomic_thread_fence(__ATOMIC_RELEASE); vq->device_event->flags = flags; return 0; } +int +vhost_enable_guest_notification(struct virtio_net *dev, + struct vhost_virtqueue *vq, int enable) +{ + /* + * If the virtqueue is not ready yet, it will be applied + * when it will become ready. + */ + if (!vq->ready) + return 0; + + if (vq_is_packed(dev)) + return vhost_enable_notify_packed(dev, vq, enable); + else + return vhost_enable_notify_split(dev, vq, enable); +} + int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) { @@ -1237,14 +1369,17 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) if (!dev) return -1; + if (queue_id >= VHOST_MAX_VRING) + return -1; + vq = dev->virtqueue[queue_id]; + if (!vq) + return -1; rte_spinlock_lock(&vq->access_lock); - if (vq_is_packed(dev)) - ret = vhost_enable_notify_packed(dev, vq, enable); - else - ret = vhost_enable_notify_split(dev, vq, enable); + vq->notif_enable = enable; + ret = vhost_enable_guest_notification(dev, vq, enable); rte_spinlock_unlock(&vq->access_lock); @@ -1294,7 +1429,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid) return 0; if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) { - RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", + VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, qid); return 0; } @@ -1315,14 +1450,15 @@ out: return ret; } -int rte_vhost_get_vdpa_device_id(int vid) +struct rte_vdpa_device * +rte_vhost_get_vdpa_device(int vid) { struct virtio_net *dev = get_device(vid); if (dev == NULL) - return -1; + return NULL; - return dev->vdpa_dev_id; + return dev->vdpa_dev; } int rte_vhost_get_log_base(int vid, uint64_t *log_base, @@ -1342,13 +1478,57 @@ int rte_vhost_get_log_base(int vid, uint64_t *log_base, int rte_vhost_get_vring_base(int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx) { + struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL) return -1; - *last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx; - *last_used_idx = dev->virtqueue[queue_id]->last_used_idx; + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + if (!vq) + return -1; + + if (vq_is_packed(dev)) { + *last_avail_idx = (vq->avail_wrap_counter << 15) | + vq->last_avail_idx; + *last_used_idx = (vq->used_wrap_counter << 15) | + vq->last_used_idx; + } else { + *last_avail_idx = vq->last_avail_idx; + *last_used_idx = vq->last_used_idx; + } + + return 0; +} + +int rte_vhost_set_vring_base(int vid, uint16_t queue_id, + uint16_t last_avail_idx, uint16_t last_used_idx) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + + if (!dev) + return -1; + + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + if (!vq) + return -1; + + if (vq_is_packed(dev)) { + vq->last_avail_idx = last_avail_idx & 0x7fff; + vq->avail_wrap_counter = !!(last_avail_idx & (1 << 15)); + vq->last_used_idx = last_used_idx & 0x7fff; + vq->used_wrap_counter = !!(last_used_idx & (1 << 15)); + } else { + vq->last_avail_idx = last_avail_idx; + vq->last_used_idx = last_used_idx; + } return 0; } @@ -1360,15 +1540,23 @@ rte_vhost_get_vring_base_from_inflight(int vid, uint16_t *last_used_idx) { struct rte_vhost_inflight_info_packed *inflight_info; + struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL) return -1; + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + if (!vq) + return -1; + if (!vq_is_packed(dev)) return -1; - inflight_info = dev->virtqueue[queue_id]->inflight_packed; + inflight_info = vq->inflight_packed; if (!inflight_info) return -1; @@ -1379,29 +1567,154 @@ rte_vhost_get_vring_base_from_inflight(int vid, return 0; } -int rte_vhost_set_vring_base(int vid, uint16_t queue_id, - uint16_t last_avail_idx, uint16_t last_used_idx) +int rte_vhost_extern_callback_register(int vid, + struct rte_vhost_user_extern_ops const * const ops, void *ctx) { struct virtio_net *dev = get_device(vid); - if (!dev) + if (dev == NULL || ops == NULL) return -1; - dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx; - dev->virtqueue[queue_id]->last_used_idx = last_used_idx; - + dev->extern_ops = *ops; + dev->extern_data = ctx; return 0; } -int rte_vhost_extern_callback_register(int vid, - struct rte_vhost_user_extern_ops const * const ops, void *ctx) +int rte_vhost_async_channel_register(int vid, uint16_t queue_id, + uint32_t features, + struct rte_vhost_async_channel_ops *ops) { + struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); + struct rte_vhost_async_features f; + int node; if (dev == NULL || ops == NULL) return -1; - dev->extern_ops = *ops; - dev->extern_data = ctx; + f.intval = features; + + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + + if (unlikely(vq == NULL || !dev->async_copy)) + return -1; + + /* packed queue is not supported */ + if (unlikely(vq_is_packed(dev) || !f.async_inorder)) { + VHOST_LOG_CONFIG(ERR, + "async copy is not supported on packed queue or non-inorder mode " + "(vid %d, qid: %d)\n", vid, queue_id); + return -1; + } + + if (unlikely(ops->check_completed_copies == NULL || + ops->transfer_data == NULL)) + return -1; + + rte_spinlock_lock(&vq->access_lock); + + if (unlikely(vq->async_registered)) { + VHOST_LOG_CONFIG(ERR, + "async register failed: channel already registered " + "(vid %d, qid: %d)\n", vid, queue_id); + goto reg_out; + } + +#ifdef RTE_LIBRTE_VHOST_NUMA + if (get_mempolicy(&node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) { + VHOST_LOG_CONFIG(ERR, + "unable to get numa information in async register. " + "allocating async buffer memory on the caller thread node\n"); + node = SOCKET_ID_ANY; + } +#else + node = SOCKET_ID_ANY; +#endif + + vq->async_pkts_info = rte_malloc_socket(NULL, + vq->size * sizeof(struct async_inflight_info), + RTE_CACHE_LINE_SIZE, node); + vq->it_pool = rte_malloc_socket(NULL, + VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter), + RTE_CACHE_LINE_SIZE, node); + vq->vec_pool = rte_malloc_socket(NULL, + VHOST_MAX_ASYNC_VEC * sizeof(struct iovec), + RTE_CACHE_LINE_SIZE, node); + vq->async_descs_split = rte_malloc_socket(NULL, + vq->size * sizeof(struct vring_used_elem), + RTE_CACHE_LINE_SIZE, node); + if (!vq->async_descs_split || !vq->async_pkts_info || + !vq->it_pool || !vq->vec_pool) { + vhost_free_async_mem(vq); + VHOST_LOG_CONFIG(ERR, + "async register failed: cannot allocate memory for vq data " + "(vid %d, qid: %d)\n", vid, queue_id); + goto reg_out; + } + + vq->async_ops.check_completed_copies = ops->check_completed_copies; + vq->async_ops.transfer_data = ops->transfer_data; + + vq->async_inorder = f.async_inorder; + vq->async_threshold = f.async_threshold; + + vq->async_registered = true; + +reg_out: + rte_spinlock_unlock(&vq->access_lock); + return 0; } + +int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + int ret = -1; + + if (dev == NULL) + return ret; + + if (queue_id >= VHOST_MAX_VRING) + return ret; + + vq = dev->virtqueue[queue_id]; + + if (vq == NULL) + return ret; + + ret = 0; + + if (!vq->async_registered) + return ret; + + if (!rte_spinlock_trylock(&vq->access_lock)) { + VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " + "virt queue busy.\n"); + return -1; + } + + if (vq->async_pkts_inflight_n) { + VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " + "async inflight packets must be completed before unregistration.\n"); + ret = -1; + goto out; + } + + vhost_free_async_mem(vq); + + vq->async_ops.transfer_data = NULL; + vq->async_ops.check_completed_copies = NULL; + vq->async_registered = false; + +out: + rte_spinlock_unlock(&vq->access_lock); + + return ret; +} + +RTE_LOG_REGISTER(vhost_config_log_level, lib.vhost.config, INFO); +RTE_LOG_REGISTER(vhost_data_log_level, lib.vhost.data, WARNING);