X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fvhost%2Fvhost.c;h=bc88148347a946ded5c87f82e4ad4a40fe911db2;hb=a2dfcd1ff609f5a4fd3b65774618a35c5c9f73c6;hp=c96f6335c8576c89ebba88dab268212de38754be;hpb=f31ce483bcdced91685b5d3c7291d79793f7499c;p=dpdk.git diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index c96f6335c8..bc88148347 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -4,7 +4,6 @@ #include #include -#include #include #include #ifdef RTE_LIBRTE_VHOST_NUMA @@ -13,19 +12,16 @@ #endif #include -#include #include -#include #include #include #include -#include #include "iotlb.h" #include "vhost.h" #include "vhost_user.h" -struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; +struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE]; pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER; /* Called with iotlb_lock read-locked */ @@ -56,11 +52,10 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, */ vhost_user_iotlb_rd_unlock(vq); - vhost_user_iotlb_pending_insert(vq, iova, perm); + vhost_user_iotlb_pending_insert(dev, vq, iova, perm); if (vhost_user_iotlb_miss(dev, iova, perm)) { - VHOST_LOG_CONFIG(ERR, - "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n", - iova); + VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n", + dev->ifname, iova); vhost_user_iotlb_pending_remove(vq, iova, 1, perm); } @@ -126,8 +121,8 @@ __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); if (map_len != len) { VHOST_LOG_DATA(ERR, - "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", - iova); + "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", + dev->ifname, iova); return; } @@ -243,8 +238,8 @@ __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); if (map_len != len) { VHOST_LOG_DATA(ERR, - "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", - iova); + "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", + dev->ifname, iova); return; } @@ -261,7 +256,7 @@ vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t src, dst; uint64_t len, remain = desc_len; - idesc = rte_malloc(__func__, desc_len, 0); + idesc = rte_malloc_socket(__func__, desc_len, 0, vq->numa_node); if (unlikely(!idesc)) return NULL; @@ -312,10 +307,10 @@ cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq) if (vq->resubmit_inflight) { if (vq->resubmit_inflight->resubmit_list) { - free(vq->resubmit_inflight->resubmit_list); + rte_free(vq->resubmit_inflight->resubmit_list); vq->resubmit_inflight->resubmit_list = NULL; } - free(vq->resubmit_inflight); + rte_free(vq->resubmit_inflight); vq->resubmit_inflight = NULL; } } @@ -340,19 +335,19 @@ cleanup_device(struct virtio_net *dev, int destroy) static void vhost_free_async_mem(struct vhost_virtqueue *vq) { - rte_free(vq->async_pkts_info); + if (!vq->async) + return; - rte_free(vq->async_buffers_packed); - vq->async_buffers_packed = NULL; - rte_free(vq->async_descs_split); - vq->async_descs_split = NULL; + rte_free(vq->async->pkts_info); + rte_free(vq->async->pkts_cmpl_flag); - rte_free(vq->it_pool); - rte_free(vq->vec_pool); + rte_free(vq->async->buffers_packed); + vq->async->buffers_packed = NULL; + rte_free(vq->async->descs_split); + vq->async->descs_split = NULL; - vq->async_pkts_info = NULL; - vq->it_pool = NULL; - vq->vec_pool = NULL; + rte_free(vq->async); + vq->async = NULL; } void @@ -422,10 +417,10 @@ translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq, gpa = hva_to_gpa(dev, hva, exp_size); if (!gpa) { - VHOST_LOG_CONFIG(ERR, - "VQ: Failed to find GPA for log_addr: 0x%" + VHOST_LOG_DATA(ERR, + "(%s) failed to find GPA for log_addr: 0x%" PRIx64 " hva: 0x%" PRIx64 "\n", - log_addr, hva); + dev->ifname, log_addr, hva); return 0; } return gpa; @@ -549,18 +544,18 @@ static void init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) { struct vhost_virtqueue *vq; + int numa_node = SOCKET_ID_ANY; if (vring_idx >= VHOST_MAX_VRING) { - VHOST_LOG_CONFIG(ERR, - "Failed not init vring, out of bound (%d)\n", - vring_idx); + VHOST_LOG_CONFIG(ERR, "(%s) failed to init vring, out of bound (%d)\n", + dev->ifname, vring_idx); return; } vq = dev->virtqueue[vring_idx]; if (!vq) { - VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n", - vring_idx); + VHOST_LOG_CONFIG(ERR, "(%s) virtqueue not allocated (%d)\n", + dev->ifname, vring_idx); return; } @@ -570,6 +565,15 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF; +#ifdef RTE_LIBRTE_VHOST_NUMA + if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n", + dev->ifname, rte_strerror(errno)); + numa_node = SOCKET_ID_ANY; + } +#endif + vq->numa_node = numa_node; + vhost_user_iotlb_init(dev, vring_idx); } @@ -581,15 +585,15 @@ reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx) if (vring_idx >= VHOST_MAX_VRING) { VHOST_LOG_CONFIG(ERR, - "Failed not init vring, out of bound (%d)\n", - vring_idx); + "(%s) failed to reset vring, out of bound (%d)\n", + dev->ifname, vring_idx); return; } vq = dev->virtqueue[vring_idx]; if (!vq) { - VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n", - vring_idx); + VHOST_LOG_CONFIG(ERR, "(%s) failed to reset vring, virtqueue not allocated (%d)\n", + dev->ifname, vring_idx); return; } @@ -611,8 +615,8 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx) vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0); if (vq == NULL) { - VHOST_LOG_CONFIG(ERR, - "Failed to allocate memory for vring:%u.\n", i); + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for vring %u.\n", + dev->ifname, i); return -1; } @@ -658,22 +662,20 @@ vhost_new_device(void) int i; pthread_mutex_lock(&vhost_dev_lock); - for (i = 0; i < MAX_VHOST_DEVICE; i++) { + for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) { if (vhost_devices[i] == NULL) break; } - if (i == MAX_VHOST_DEVICE) { - VHOST_LOG_CONFIG(ERR, - "Failed to find a free slot for new device.\n"); + if (i == RTE_MAX_VHOST_DEVICE) { + VHOST_LOG_CONFIG(ERR, "failed to find a free slot for new device.\n"); pthread_mutex_unlock(&vhost_dev_lock); return -1; } dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0); if (dev == NULL) { - VHOST_LOG_CONFIG(ERR, - "Failed to allocate memory for new dev.\n"); + VHOST_LOG_CONFIG(ERR, "failed to allocate memory for new device.\n"); pthread_mutex_unlock(&vhost_dev_lock); return -1; } @@ -825,9 +827,8 @@ rte_vhost_get_numa_node(int vid) ret = get_mempolicy(&numa_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret < 0) { - VHOST_LOG_CONFIG(ERR, - "(%d) failed to query numa node: %s\n", - vid, rte_strerror(errno)); + VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n", + dev->ifname, rte_strerror(errno)); return -1; } @@ -1216,6 +1217,9 @@ rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx, if (unlikely(!vq->inflight_split)) return -1; + if (unlikely(idx >= vq->size)) + return -1; + vq->inflight_split->last_inflight_io = idx; return 0; } @@ -1458,8 +1462,8 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid) return 0; if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) { - VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n", - dev->vid, __func__, qid); + VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n", + dev->ifname, __func__, qid); return 0; } @@ -1490,7 +1494,8 @@ rte_vhost_get_vdpa_device(int vid) return dev->vdpa_dev; } -int rte_vhost_get_log_base(int vid, uint64_t *log_base, +int +rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size) { struct virtio_net *dev = get_device(vid); @@ -1504,7 +1509,8 @@ int rte_vhost_get_log_base(int vid, uint64_t *log_base, return 0; } -int rte_vhost_get_vring_base(int vid, uint16_t queue_id, +int +rte_vhost_get_vring_base(int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx) { struct vhost_virtqueue *vq; @@ -1533,7 +1539,8 @@ int rte_vhost_get_vring_base(int vid, uint16_t queue_id, return 0; } -int rte_vhost_set_vring_base(int vid, uint16_t queue_id, +int +rte_vhost_set_vring_base(int vid, uint16_t queue_id, uint16_t last_avail_idx, uint16_t last_used_idx) { struct vhost_virtqueue *vq; @@ -1596,7 +1603,8 @@ rte_vhost_get_vring_base_from_inflight(int vid, return 0; } -int rte_vhost_extern_callback_register(int vid, +int +rte_vhost_extern_callback_register(int vid, struct rte_vhost_user_extern_ops const * const ops, void *ctx) { struct virtio_net *dev = get_device(vid); @@ -1609,19 +1617,84 @@ int rte_vhost_extern_callback_register(int vid, return 0; } -int rte_vhost_async_channel_register(int vid, uint16_t queue_id, - uint32_t features, - struct rte_vhost_async_channel_ops *ops) +static __rte_always_inline int +async_channel_register(int vid, uint16_t queue_id) { - struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); - struct rte_vhost_async_features f; - int node; + struct vhost_virtqueue *vq = dev->virtqueue[queue_id]; + struct vhost_async *async; + int node = vq->numa_node; - if (dev == NULL || ops == NULL) + if (unlikely(vq->async)) { + VHOST_LOG_CONFIG(ERR, + "(%s) async register failed: already registered (qid: %d)\n", + dev->ifname, queue_id); + return -1; + } + + async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node); + if (!async) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async metadata (qid: %d)\n", + dev->ifname, queue_id); return -1; + } + + async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info), + RTE_CACHE_LINE_SIZE, node); + if (!async->pkts_info) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async_pkts_info (qid: %d)\n", + dev->ifname, queue_id); + goto out_free_async; + } + + async->pkts_cmpl_flag = rte_zmalloc_socket(NULL, vq->size * sizeof(bool), + RTE_CACHE_LINE_SIZE, node); + if (!async->pkts_cmpl_flag) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async pkts_cmpl_flag (qid: %d)\n", + dev->ifname, queue_id); + goto out_free_async; + } + + if (vq_is_packed(dev)) { + async->buffers_packed = rte_malloc_socket(NULL, + vq->size * sizeof(struct vring_used_elem_packed), + RTE_CACHE_LINE_SIZE, node); + if (!async->buffers_packed) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async buffers (qid: %d)\n", + dev->ifname, queue_id); + goto out_free_inflight; + } + } else { + async->descs_split = rte_malloc_socket(NULL, + vq->size * sizeof(struct vring_used_elem), + RTE_CACHE_LINE_SIZE, node); + if (!async->descs_split) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async descs (qid: %d)\n", + dev->ifname, queue_id); + goto out_free_inflight; + } + } + + vq->async = async; + + return 0; +out_free_inflight: + rte_free(async->pkts_info); +out_free_async: + rte_free(async); + + return -1; +} + +int +rte_vhost_async_channel_register(int vid, uint16_t queue_id) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + int ret; - f.intval = features; + if (dev == NULL) + return -1; if (queue_id >= VHOST_MAX_VRING) return -1; @@ -1631,109 +1704,173 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id, if (unlikely(vq == NULL || !dev->async_copy)) return -1; - if (unlikely(!f.async_inorder)) { - VHOST_LOG_CONFIG(ERR, - "async copy is not supported on non-inorder mode " - "(vid %d, qid: %d)\n", vid, queue_id); + rte_spinlock_lock(&vq->access_lock); + ret = async_channel_register(vid, queue_id); + rte_spinlock_unlock(&vq->access_lock); + + return ret; +} + +int +rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + + if (dev == NULL) return -1; - } - if (unlikely(ops->check_completed_copies == NULL || - ops->transfer_data == NULL)) + if (queue_id >= VHOST_MAX_VRING) return -1; - rte_spinlock_lock(&vq->access_lock); + vq = dev->virtqueue[queue_id]; - if (unlikely(vq->async_registered)) { - VHOST_LOG_CONFIG(ERR, - "async register failed: channel already registered " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; + if (unlikely(vq == NULL || !dev->async_copy)) + return -1; + + return async_channel_register(vid, queue_id); +} + +int +rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + int ret = -1; + + if (dev == NULL) + return ret; + + if (queue_id >= VHOST_MAX_VRING) + return ret; + + vq = dev->virtqueue[queue_id]; + + if (vq == NULL) + return ret; + + ret = 0; + + if (!vq->async) + return ret; + + if (!rte_spinlock_trylock(&vq->access_lock)) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel, virtqueue busy.\n", + dev->ifname); + return -1; } -#ifdef RTE_LIBRTE_VHOST_NUMA - if (get_mempolicy(&node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) { - VHOST_LOG_CONFIG(ERR, - "unable to get numa information in async register. " - "allocating async buffer memory on the caller thread node\n"); - node = SOCKET_ID_ANY; + if (vq->async->pkts_inflight_n) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname); + VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n", + dev->ifname); + ret = -1; + goto out; } -#else - node = SOCKET_ID_ANY; -#endif - vq->async_pkts_info = rte_malloc_socket(NULL, - vq->size * sizeof(struct async_inflight_info), - RTE_CACHE_LINE_SIZE, node); - if (!vq->async_pkts_info) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async_pkts_info " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; + vhost_free_async_mem(vq); +out: + rte_spinlock_unlock(&vq->access_lock); + + return ret; +} + +int +rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + + if (dev == NULL) + return -1; + + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + + if (vq == NULL) + return -1; + + if (!vq->async) + return 0; + + if (vq->async->pkts_inflight_n) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname); + VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n", + dev->ifname); + return -1; } - vq->it_pool = rte_malloc_socket(NULL, - VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter), - RTE_CACHE_LINE_SIZE, node); - if (!vq->it_pool) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for it_pool " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; + vhost_free_async_mem(vq); + + return 0; +} + +int +rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id) +{ + struct rte_dma_info info; + void *pkts_cmpl_flag_addr; + uint16_t max_desc; + + if (!rte_dma_is_valid(dma_id)) { + VHOST_LOG_CONFIG(ERR, "DMA %d is not found.\n", dma_id); + return -1; } - vq->vec_pool = rte_malloc_socket(NULL, - VHOST_MAX_ASYNC_VEC * sizeof(struct iovec), - RTE_CACHE_LINE_SIZE, node); - if (!vq->vec_pool) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for vec_pool " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; + rte_dma_info_get(dma_id, &info); + if (vchan_id >= info.max_vchans) { + VHOST_LOG_CONFIG(ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id); + return -1; } - if (vq_is_packed(dev)) { - vq->async_buffers_packed = rte_malloc_socket(NULL, - vq->size * sizeof(struct vring_used_elem_packed), - RTE_CACHE_LINE_SIZE, node); - if (!vq->async_buffers_packed) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async buffers " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } - } else { - vq->async_descs_split = rte_malloc_socket(NULL, - vq->size * sizeof(struct vring_used_elem), - RTE_CACHE_LINE_SIZE, node); - if (!vq->async_descs_split) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async descs " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; + if (!dma_copy_track[dma_id].vchans) { + struct async_dma_vchan_info *vchans; + + vchans = rte_zmalloc(NULL, sizeof(struct async_dma_vchan_info) * info.max_vchans, + RTE_CACHE_LINE_SIZE); + if (vchans == NULL) { + VHOST_LOG_CONFIG(ERR, "Failed to allocate vchans for DMA %d vChannel %u.\n", + dma_id, vchan_id); + return -1; } + + dma_copy_track[dma_id].vchans = vchans; } - vq->async_ops.check_completed_copies = ops->check_completed_copies; - vq->async_ops.transfer_data = ops->transfer_data; + if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) { + VHOST_LOG_CONFIG(INFO, "DMA %d vChannel %u already registered.\n", dma_id, + vchan_id); + return 0; + } - vq->async_inorder = f.async_inorder; - vq->async_threshold = f.async_threshold; + max_desc = info.max_desc; + if (!rte_is_power_of_2(max_desc)) + max_desc = rte_align32pow2(max_desc); - vq->async_registered = true; + pkts_cmpl_flag_addr = rte_zmalloc(NULL, sizeof(bool *) * max_desc, RTE_CACHE_LINE_SIZE); + if (!pkts_cmpl_flag_addr) { + VHOST_LOG_CONFIG(ERR, "Failed to allocate pkts_cmpl_flag_addr for DMA %d " + "vChannel %u.\n", dma_id, vchan_id); -reg_out: - rte_spinlock_unlock(&vq->access_lock); + if (dma_copy_track[dma_id].nr_vchans == 0) { + rte_free(dma_copy_track[dma_id].vchans); + dma_copy_track[dma_id].vchans = NULL; + } + return -1; + } + + dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr = pkts_cmpl_flag_addr; + dma_copy_track[dma_id].vchans[vchan_id].ring_size = max_desc; + dma_copy_track[dma_id].vchans[vchan_id].ring_mask = max_desc - 1; + dma_copy_track[dma_id].nr_vchans++; return 0; } -int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) +int +rte_vhost_async_get_inflight(int vid, uint16_t queue_id) { struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); @@ -1750,34 +1887,58 @@ int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) if (vq == NULL) return ret; - ret = 0; - - if (!vq->async_registered) + if (!vq->async) return ret; if (!rte_spinlock_trylock(&vq->access_lock)) { - VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " - "virt queue busy.\n"); - return -1; + VHOST_LOG_CONFIG(DEBUG, + "(%s) failed to check in-flight packets. virtqueue busy.\n", + dev->ifname); + return ret; } - if (vq->async_pkts_inflight_n) { - VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " - "async inflight packets must be completed before unregistration.\n"); - ret = -1; - goto out; - } + ret = vq->async->pkts_inflight_n; + rte_spinlock_unlock(&vq->access_lock); - vhost_free_async_mem(vq); + return ret; +} - vq->async_ops.transfer_data = NULL; - vq->async_ops.check_completed_copies = NULL; - vq->async_registered = false; +int +rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, + struct rte_vhost_power_monitor_cond *pmc) +{ + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq; -out: - rte_spinlock_unlock(&vq->access_lock); + if (dev == NULL) + return -1; + if (queue_id >= VHOST_MAX_VRING) + return -1; - return ret; + vq = dev->virtqueue[queue_id]; + if (vq == NULL) + return -1; + + if (vq_is_packed(dev)) { + struct vring_packed_desc *desc; + desc = vq->desc_packed; + pmc->addr = &desc[vq->last_avail_idx].flags; + if (vq->avail_wrap_counter) + pmc->val = VRING_DESC_F_AVAIL; + else + pmc->val = VRING_DESC_F_USED; + pmc->mask = VRING_DESC_F_AVAIL | VRING_DESC_F_USED; + pmc->size = sizeof(desc[vq->last_avail_idx].flags); + pmc->match = 1; + } else { + pmc->addr = &vq->avail->idx; + pmc->val = vq->last_avail_idx & (vq->size - 1); + pmc->mask = vq->size - 1; + pmc->size = sizeof(vq->avail->idx); + pmc->match = 0; + } + + return 0; } RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);