X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fvhost%2Fvhost.c;h=f59ca6c157e67b050ceb42530a57b7a243a4331c;hb=6e858b4d9244cf53505589673755ab18ac2a4a83;hp=3f82d3fa17ca088e7ce9f7310a30f980c1474a2a;hpb=0c0935c5f794c66e587d8e0fa5bb2dc75c9c010d;p=dpdk.git diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index 3f82d3fa17..f59ca6c157 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -56,11 +56,10 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, */ vhost_user_iotlb_rd_unlock(vq); - vhost_user_iotlb_pending_insert(vq, iova, perm); + vhost_user_iotlb_pending_insert(dev, vq, iova, perm); if (vhost_user_iotlb_miss(dev, iova, perm)) { - VHOST_LOG_CONFIG(ERR, - "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n", - iova); + VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n", + dev->ifname, iova); vhost_user_iotlb_pending_remove(vq, iova, 1, perm); } @@ -126,8 +125,8 @@ __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); if (map_len != len) { VHOST_LOG_DATA(ERR, - "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", - iova); + "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", + dev->ifname, iova); return; } @@ -243,8 +242,8 @@ __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); if (map_len != len) { VHOST_LOG_DATA(ERR, - "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", - iova); + "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", + dev->ifname, iova); return; } @@ -340,19 +339,18 @@ cleanup_device(struct virtio_net *dev, int destroy) static void vhost_free_async_mem(struct vhost_virtqueue *vq) { - rte_free(vq->async_pkts_info); + if (!vq->async) + return; - rte_free(vq->async_buffers_packed); - vq->async_buffers_packed = NULL; - rte_free(vq->async_descs_split); - vq->async_descs_split = NULL; + rte_free(vq->async->pkts_info); - rte_free(vq->it_pool); - rte_free(vq->vec_pool); + rte_free(vq->async->buffers_packed); + vq->async->buffers_packed = NULL; + rte_free(vq->async->descs_split); + vq->async->descs_split = NULL; - vq->async_pkts_info = NULL; - vq->it_pool = NULL; - vq->vec_pool = NULL; + rte_free(vq->async); + vq->async = NULL; } void @@ -422,10 +420,10 @@ translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq, gpa = hva_to_gpa(dev, hva, exp_size); if (!gpa) { - VHOST_LOG_CONFIG(ERR, - "VQ: Failed to find GPA for log_addr: 0x%" + VHOST_LOG_DATA(ERR, + "(%s) failed to find GPA for log_addr: 0x%" PRIx64 " hva: 0x%" PRIx64 "\n", - log_addr, hva); + dev->ifname, log_addr, hva); return 0; } return gpa; @@ -552,16 +550,15 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) int numa_node = SOCKET_ID_ANY; if (vring_idx >= VHOST_MAX_VRING) { - VHOST_LOG_CONFIG(ERR, - "Failed not init vring, out of bound (%d)\n", - vring_idx); + VHOST_LOG_CONFIG(ERR, "(%s) failed to init vring, out of bound (%d)\n", + dev->ifname, vring_idx); return; } vq = dev->virtqueue[vring_idx]; if (!vq) { - VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n", - vring_idx); + VHOST_LOG_CONFIG(ERR, "(%s) virtqueue not allocated (%d)\n", + dev->ifname, vring_idx); return; } @@ -573,8 +570,8 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) #ifdef RTE_LIBRTE_VHOST_NUMA if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) { - VHOST_LOG_CONFIG(ERR, "(%d) failed to query numa node: %s\n", - dev->vid, rte_strerror(errno)); + VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n", + dev->ifname, rte_strerror(errno)); numa_node = SOCKET_ID_ANY; } #endif @@ -591,15 +588,15 @@ reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx) if (vring_idx >= VHOST_MAX_VRING) { VHOST_LOG_CONFIG(ERR, - "Failed not init vring, out of bound (%d)\n", - vring_idx); + "(%s) failed to reset vring, out of bound (%d)\n", + dev->ifname, vring_idx); return; } vq = dev->virtqueue[vring_idx]; if (!vq) { - VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n", - vring_idx); + VHOST_LOG_CONFIG(ERR, "(%s) failed to reset vring, virtqueue not allocated (%d)\n", + dev->ifname, vring_idx); return; } @@ -621,8 +618,8 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx) vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0); if (vq == NULL) { - VHOST_LOG_CONFIG(ERR, - "Failed to allocate memory for vring:%u.\n", i); + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for vring %u.\n", + dev->ifname, i); return -1; } @@ -674,16 +671,14 @@ vhost_new_device(void) } if (i == MAX_VHOST_DEVICE) { - VHOST_LOG_CONFIG(ERR, - "Failed to find a free slot for new device.\n"); + VHOST_LOG_CONFIG(ERR, "failed to find a free slot for new device.\n"); pthread_mutex_unlock(&vhost_dev_lock); return -1; } dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0); if (dev == NULL) { - VHOST_LOG_CONFIG(ERR, - "Failed to allocate memory for new dev.\n"); + VHOST_LOG_CONFIG(ERR, "failed to allocate memory for new device.\n"); pthread_mutex_unlock(&vhost_dev_lock); return -1; } @@ -835,9 +830,8 @@ rte_vhost_get_numa_node(int vid) ret = get_mempolicy(&numa_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret < 0) { - VHOST_LOG_CONFIG(ERR, - "(%d) failed to query numa node: %s\n", - vid, rte_strerror(errno)); + VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n", + dev->ifname, rte_strerror(errno)); return -1; } @@ -1226,6 +1220,9 @@ rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx, if (unlikely(!vq->inflight_split)) return -1; + if (unlikely(idx >= vq->size)) + return -1; + vq->inflight_split->last_inflight_io = idx; return 0; } @@ -1468,8 +1465,8 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid) return 0; if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) { - VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n", - dev->vid, __func__, qid); + VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n", + dev->ifname, __func__, qid); return 0; } @@ -1500,7 +1497,8 @@ rte_vhost_get_vdpa_device(int vid) return dev->vdpa_dev; } -int rte_vhost_get_log_base(int vid, uint64_t *log_base, +int +rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size) { struct virtio_net *dev = get_device(vid); @@ -1514,7 +1512,8 @@ int rte_vhost_get_log_base(int vid, uint64_t *log_base, return 0; } -int rte_vhost_get_vring_base(int vid, uint16_t queue_id, +int +rte_vhost_get_vring_base(int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx) { struct vhost_virtqueue *vq; @@ -1543,7 +1542,8 @@ int rte_vhost_get_vring_base(int vid, uint16_t queue_id, return 0; } -int rte_vhost_set_vring_base(int vid, uint16_t queue_id, +int +rte_vhost_set_vring_base(int vid, uint16_t queue_id, uint16_t last_avail_idx, uint16_t last_used_idx) { struct vhost_virtqueue *vq; @@ -1606,7 +1606,8 @@ rte_vhost_get_vring_base_from_inflight(int vid, return 0; } -int rte_vhost_extern_callback_register(int vid, +int +rte_vhost_extern_callback_register(int vid, struct rte_vhost_user_extern_ops const * const ops, void *ctx) { struct virtio_net *dev = get_device(vid); @@ -1619,19 +1620,83 @@ int rte_vhost_extern_callback_register(int vid, return 0; } -int rte_vhost_async_channel_register(int vid, uint16_t queue_id, - uint32_t features, - struct rte_vhost_async_channel_ops *ops) +static __rte_always_inline int +async_channel_register(int vid, uint16_t queue_id, + struct rte_vhost_async_channel_ops *ops) +{ + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq = dev->virtqueue[queue_id]; + struct vhost_async *async; + int node = vq->numa_node; + + if (unlikely(vq->async)) { + VHOST_LOG_CONFIG(ERR, + "(%s) async register failed: already registered (qid: %d)\n", + dev->ifname, queue_id); + return -1; + } + + async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node); + if (!async) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async metadata (qid: %d)\n", + dev->ifname, queue_id); + return -1; + } + + async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info), + RTE_CACHE_LINE_SIZE, node); + if (!async->pkts_info) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async_pkts_info (qid: %d)\n", + dev->ifname, queue_id); + goto out_free_async; + } + + if (vq_is_packed(dev)) { + async->buffers_packed = rte_malloc_socket(NULL, + vq->size * sizeof(struct vring_used_elem_packed), + RTE_CACHE_LINE_SIZE, node); + if (!async->buffers_packed) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async buffers (qid: %d)\n", + dev->ifname, queue_id); + goto out_free_inflight; + } + } else { + async->descs_split = rte_malloc_socket(NULL, + vq->size * sizeof(struct vring_used_elem), + RTE_CACHE_LINE_SIZE, node); + if (!async->descs_split) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async descs (qid: %d)\n", + dev->ifname, queue_id); + goto out_free_inflight; + } + } + + async->ops.check_completed_copies = ops->check_completed_copies; + async->ops.transfer_data = ops->transfer_data; + + vq->async = async; + + return 0; +out_free_inflight: + rte_free(async->pkts_info); +out_free_async: + rte_free(async); + + return -1; +} + +int +rte_vhost_async_channel_register(int vid, uint16_t queue_id, + struct rte_vhost_async_config config, + struct rte_vhost_async_channel_ops *ops) { struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); - struct rte_vhost_async_features f; + int ret; if (dev == NULL || ops == NULL) return -1; - f.intval = features; - if (queue_id >= VHOST_MAX_VRING) return -1; @@ -1640,10 +1705,10 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id, if (unlikely(vq == NULL || !dev->async_copy)) return -1; - if (unlikely(!f.async_inorder)) { + if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) { VHOST_LOG_CONFIG(ERR, - "async copy is not supported on non-inorder mode " - "(vid %d, qid: %d)\n", vid, queue_id); + "(%s) async copy is not supported on non-inorder mode (qid: %d)\n", + dev->ifname, queue_id); return -1; } @@ -1652,86 +1717,47 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id, return -1; rte_spinlock_lock(&vq->access_lock); + ret = async_channel_register(vid, queue_id, ops); + rte_spinlock_unlock(&vq->access_lock); - if (unlikely(vq->async_registered)) { - VHOST_LOG_CONFIG(ERR, - "async register failed: channel already registered " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } - - vq->async_pkts_info = rte_malloc_socket(NULL, - vq->size * sizeof(struct async_inflight_info), - RTE_CACHE_LINE_SIZE, vq->numa_node); - if (!vq->async_pkts_info) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async_pkts_info " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } + return ret; +} - vq->it_pool = rte_malloc_socket(NULL, - VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter), - RTE_CACHE_LINE_SIZE, vq->numa_node); - if (!vq->it_pool) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for it_pool " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } +int +rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id, + struct rte_vhost_async_config config, + struct rte_vhost_async_channel_ops *ops) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); - vq->vec_pool = rte_malloc_socket(NULL, - VHOST_MAX_ASYNC_VEC * sizeof(struct iovec), - RTE_CACHE_LINE_SIZE, vq->numa_node); - if (!vq->vec_pool) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for vec_pool " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } + if (dev == NULL || ops == NULL) + return -1; - if (vq_is_packed(dev)) { - vq->async_buffers_packed = rte_malloc_socket(NULL, - vq->size * sizeof(struct vring_used_elem_packed), - RTE_CACHE_LINE_SIZE, vq->numa_node); - if (!vq->async_buffers_packed) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async buffers " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } - } else { - vq->async_descs_split = rte_malloc_socket(NULL, - vq->size * sizeof(struct vring_used_elem), - RTE_CACHE_LINE_SIZE, vq->numa_node); - if (!vq->async_descs_split) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async descs " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } - } + if (queue_id >= VHOST_MAX_VRING) + return -1; - vq->async_ops.check_completed_copies = ops->check_completed_copies; - vq->async_ops.transfer_data = ops->transfer_data; + vq = dev->virtqueue[queue_id]; - vq->async_inorder = f.async_inorder; - vq->async_threshold = f.async_threshold; + if (unlikely(vq == NULL || !dev->async_copy)) + return -1; - vq->async_registered = true; + if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) { + VHOST_LOG_CONFIG(ERR, + "(%s) async copy is not supported on non-inorder mode (qid: %d)\n", + dev->ifname, queue_id); + return -1; + } -reg_out: - rte_spinlock_unlock(&vq->access_lock); + if (unlikely(ops->check_completed_copies == NULL || + ops->transfer_data == NULL)) + return -1; - return 0; + return async_channel_register(vid, queue_id, ops); } -int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) +int +rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) { struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); @@ -1750,35 +1776,64 @@ int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) ret = 0; - if (!vq->async_registered) + if (!vq->async) return ret; if (!rte_spinlock_trylock(&vq->access_lock)) { - VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " - "virt queue busy.\n"); + VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel, virtqueue busy.\n", + dev->ifname); return -1; } - if (vq->async_pkts_inflight_n) { - VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " - "async inflight packets must be completed before unregistration.\n"); + if (vq->async->pkts_inflight_n) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname); + VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n", + dev->ifname); ret = -1; goto out; } vhost_free_async_mem(vq); - - vq->async_ops.transfer_data = NULL; - vq->async_ops.check_completed_copies = NULL; - vq->async_registered = false; - out: rte_spinlock_unlock(&vq->access_lock); return ret; } -int rte_vhost_async_get_inflight(int vid, uint16_t queue_id) +int +rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + + if (dev == NULL) + return -1; + + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + + if (vq == NULL) + return -1; + + if (!vq->async) + return 0; + + if (vq->async->pkts_inflight_n) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname); + VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n", + dev->ifname); + return -1; + } + + vhost_free_async_mem(vq); + + return 0; +} + +int +rte_vhost_async_get_inflight(int vid, uint16_t queue_id) { struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); @@ -1795,20 +1850,58 @@ int rte_vhost_async_get_inflight(int vid, uint16_t queue_id) if (vq == NULL) return ret; - if (!vq->async_registered) + if (!vq->async) return ret; if (!rte_spinlock_trylock(&vq->access_lock)) { - VHOST_LOG_CONFIG(DEBUG, "Failed to check in-flight packets. " - "virt queue busy.\n"); + VHOST_LOG_CONFIG(DEBUG, + "(%s) failed to check in-flight packets. virtqueue busy.\n", + dev->ifname); return ret; } - ret = vq->async_pkts_inflight_n; + ret = vq->async->pkts_inflight_n; rte_spinlock_unlock(&vq->access_lock); return ret; +} + +int +rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, + struct rte_vhost_power_monitor_cond *pmc) +{ + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq; + + if (dev == NULL) + return -1; + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + if (vq == NULL) + return -1; + + if (vq_is_packed(dev)) { + struct vring_packed_desc *desc; + desc = vq->desc_packed; + pmc->addr = &desc[vq->last_avail_idx].flags; + if (vq->avail_wrap_counter) + pmc->val = VRING_DESC_F_AVAIL; + else + pmc->val = VRING_DESC_F_USED; + pmc->mask = VRING_DESC_F_AVAIL | VRING_DESC_F_USED; + pmc->size = sizeof(desc[vq->last_avail_idx].flags); + pmc->match = 1; + } else { + pmc->addr = &vq->avail->idx; + pmc->val = vq->last_avail_idx & (vq->size - 1); + pmc->mask = vq->size - 1; + pmc->size = sizeof(vq->avail->idx); + pmc->match = 0; + } + return 0; } RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);