X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fvhost%2Fvhost.c;h=f59ca6c157e67b050ceb42530a57b7a243a4331c;hb=6e858b4d9244cf53505589673755ab18ac2a4a83;hp=3b674ac3209096c0dc9869a13c4ac396a2d44406;hpb=5a4fbe79e6d124141bd0a820c897287b15c559c4;p=dpdk.git diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index 3b674ac320..f59ca6c157 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -56,11 +56,10 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, */ vhost_user_iotlb_rd_unlock(vq); - vhost_user_iotlb_pending_insert(vq, iova, perm); + vhost_user_iotlb_pending_insert(dev, vq, iova, perm); if (vhost_user_iotlb_miss(dev, iova, perm)) { - VHOST_LOG_CONFIG(ERR, - "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n", - iova); + VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n", + dev->ifname, iova); vhost_user_iotlb_pending_remove(vq, iova, 1, perm); } @@ -126,8 +125,8 @@ __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); if (map_len != len) { VHOST_LOG_DATA(ERR, - "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", - iova); + "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", + dev->ifname, iova); return; } @@ -243,8 +242,8 @@ __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW); if (map_len != len) { VHOST_LOG_DATA(ERR, - "Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", - iova); + "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n", + dev->ifname, iova); return; } @@ -340,19 +339,18 @@ cleanup_device(struct virtio_net *dev, int destroy) static void vhost_free_async_mem(struct vhost_virtqueue *vq) { - rte_free(vq->async_pkts_info); + if (!vq->async) + return; - rte_free(vq->async_buffers_packed); - vq->async_buffers_packed = NULL; - rte_free(vq->async_descs_split); - vq->async_descs_split = NULL; + rte_free(vq->async->pkts_info); - rte_free(vq->it_pool); - rte_free(vq->vec_pool); + rte_free(vq->async->buffers_packed); + vq->async->buffers_packed = NULL; + rte_free(vq->async->descs_split); + vq->async->descs_split = NULL; - vq->async_pkts_info = NULL; - vq->it_pool = NULL; - vq->vec_pool = NULL; + rte_free(vq->async); + vq->async = NULL; } void @@ -422,10 +420,10 @@ translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq, gpa = hva_to_gpa(dev, hva, exp_size); if (!gpa) { - VHOST_LOG_CONFIG(ERR, - "VQ: Failed to find GPA for log_addr: 0x%" + VHOST_LOG_DATA(ERR, + "(%s) failed to find GPA for log_addr: 0x%" PRIx64 " hva: 0x%" PRIx64 "\n", - log_addr, hva); + dev->ifname, log_addr, hva); return 0; } return gpa; @@ -552,16 +550,15 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) int numa_node = SOCKET_ID_ANY; if (vring_idx >= VHOST_MAX_VRING) { - VHOST_LOG_CONFIG(ERR, - "Failed not init vring, out of bound (%d)\n", - vring_idx); + VHOST_LOG_CONFIG(ERR, "(%s) failed to init vring, out of bound (%d)\n", + dev->ifname, vring_idx); return; } vq = dev->virtqueue[vring_idx]; if (!vq) { - VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n", - vring_idx); + VHOST_LOG_CONFIG(ERR, "(%s) virtqueue not allocated (%d)\n", + dev->ifname, vring_idx); return; } @@ -573,8 +570,8 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) #ifdef RTE_LIBRTE_VHOST_NUMA if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) { - VHOST_LOG_CONFIG(ERR, "(%d) failed to query numa node: %s\n", - dev->vid, rte_strerror(errno)); + VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n", + dev->ifname, rte_strerror(errno)); numa_node = SOCKET_ID_ANY; } #endif @@ -591,15 +588,15 @@ reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx) if (vring_idx >= VHOST_MAX_VRING) { VHOST_LOG_CONFIG(ERR, - "Failed not init vring, out of bound (%d)\n", - vring_idx); + "(%s) failed to reset vring, out of bound (%d)\n", + dev->ifname, vring_idx); return; } vq = dev->virtqueue[vring_idx]; if (!vq) { - VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n", - vring_idx); + VHOST_LOG_CONFIG(ERR, "(%s) failed to reset vring, virtqueue not allocated (%d)\n", + dev->ifname, vring_idx); return; } @@ -621,8 +618,8 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx) vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0); if (vq == NULL) { - VHOST_LOG_CONFIG(ERR, - "Failed to allocate memory for vring:%u.\n", i); + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for vring %u.\n", + dev->ifname, i); return -1; } @@ -674,16 +671,14 @@ vhost_new_device(void) } if (i == MAX_VHOST_DEVICE) { - VHOST_LOG_CONFIG(ERR, - "Failed to find a free slot for new device.\n"); + VHOST_LOG_CONFIG(ERR, "failed to find a free slot for new device.\n"); pthread_mutex_unlock(&vhost_dev_lock); return -1; } dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0); if (dev == NULL) { - VHOST_LOG_CONFIG(ERR, - "Failed to allocate memory for new dev.\n"); + VHOST_LOG_CONFIG(ERR, "failed to allocate memory for new device.\n"); pthread_mutex_unlock(&vhost_dev_lock); return -1; } @@ -835,9 +830,8 @@ rte_vhost_get_numa_node(int vid) ret = get_mempolicy(&numa_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret < 0) { - VHOST_LOG_CONFIG(ERR, - "(%d) failed to query numa node: %s\n", - vid, rte_strerror(errno)); + VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n", + dev->ifname, rte_strerror(errno)); return -1; } @@ -1471,8 +1465,8 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid) return 0; if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) { - VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n", - dev->vid, __func__, qid); + VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n", + dev->ifname, __func__, qid); return 0; } @@ -1632,77 +1626,63 @@ async_channel_register(int vid, uint16_t queue_id, { struct virtio_net *dev = get_device(vid); struct vhost_virtqueue *vq = dev->virtqueue[queue_id]; + struct vhost_async *async; + int node = vq->numa_node; - if (unlikely(vq->async_registered)) { + if (unlikely(vq->async)) { VHOST_LOG_CONFIG(ERR, - "async register failed: channel already registered " - "(vid %d, qid: %d)\n", vid, queue_id); + "(%s) async register failed: already registered (qid: %d)\n", + dev->ifname, queue_id); return -1; } - vq->async_pkts_info = rte_malloc_socket(NULL, - vq->size * sizeof(struct async_inflight_info), - RTE_CACHE_LINE_SIZE, vq->numa_node); - if (!vq->async_pkts_info) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async_pkts_info " - "(vid %d, qid: %d)\n", vid, queue_id); + async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node); + if (!async) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async metadata (qid: %d)\n", + dev->ifname, queue_id); return -1; } - vq->it_pool = rte_malloc_socket(NULL, - VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter), - RTE_CACHE_LINE_SIZE, vq->numa_node); - if (!vq->it_pool) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for it_pool " - "(vid %d, qid: %d)\n", vid, queue_id); - return -1; - } - - vq->vec_pool = rte_malloc_socket(NULL, - VHOST_MAX_ASYNC_VEC * sizeof(struct iovec), - RTE_CACHE_LINE_SIZE, vq->numa_node); - if (!vq->vec_pool) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for vec_pool " - "(vid %d, qid: %d)\n", vid, queue_id); - return -1; + async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info), + RTE_CACHE_LINE_SIZE, node); + if (!async->pkts_info) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async_pkts_info (qid: %d)\n", + dev->ifname, queue_id); + goto out_free_async; } if (vq_is_packed(dev)) { - vq->async_buffers_packed = rte_malloc_socket(NULL, - vq->size * sizeof(struct vring_used_elem_packed), - RTE_CACHE_LINE_SIZE, vq->numa_node); - if (!vq->async_buffers_packed) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async buffers " - "(vid %d, qid: %d)\n", vid, queue_id); - return -1; + async->buffers_packed = rte_malloc_socket(NULL, + vq->size * sizeof(struct vring_used_elem_packed), + RTE_CACHE_LINE_SIZE, node); + if (!async->buffers_packed) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async buffers (qid: %d)\n", + dev->ifname, queue_id); + goto out_free_inflight; } } else { - vq->async_descs_split = rte_malloc_socket(NULL, - vq->size * sizeof(struct vring_used_elem), - RTE_CACHE_LINE_SIZE, vq->numa_node); - if (!vq->async_descs_split) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async descs " - "(vid %d, qid: %d)\n", vid, queue_id); - return -1; + async->descs_split = rte_malloc_socket(NULL, + vq->size * sizeof(struct vring_used_elem), + RTE_CACHE_LINE_SIZE, node); + if (!async->descs_split) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async descs (qid: %d)\n", + dev->ifname, queue_id); + goto out_free_inflight; } } - vq->async_ops.check_completed_copies = ops->check_completed_copies; - vq->async_ops.transfer_data = ops->transfer_data; + async->ops.check_completed_copies = ops->check_completed_copies; + async->ops.transfer_data = ops->transfer_data; - vq->async_registered = true; + vq->async = async; return 0; +out_free_inflight: + rte_free(async->pkts_info); +out_free_async: + rte_free(async); + + return -1; } int @@ -1727,8 +1707,8 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id, if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) { VHOST_LOG_CONFIG(ERR, - "async copy is not supported on non-inorder mode " - "(vid %d, qid: %d)\n", vid, queue_id); + "(%s) async copy is not supported on non-inorder mode (qid: %d)\n", + dev->ifname, queue_id); return -1; } @@ -1764,8 +1744,8 @@ rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id, if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) { VHOST_LOG_CONFIG(ERR, - "async copy is not supported on non-inorder mode " - "(vid %d, qid: %d)\n", vid, queue_id); + "(%s) async copy is not supported on non-inorder mode (qid: %d)\n", + dev->ifname, queue_id); return -1; } @@ -1796,28 +1776,24 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) ret = 0; - if (!vq->async_registered) + if (!vq->async) return ret; if (!rte_spinlock_trylock(&vq->access_lock)) { - VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " - "virt queue busy.\n"); + VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel, virtqueue busy.\n", + dev->ifname); return -1; } - if (vq->async_pkts_inflight_n) { - VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " - "async inflight packets must be completed before unregistration.\n"); + if (vq->async->pkts_inflight_n) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname); + VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n", + dev->ifname); ret = -1; goto out; } vhost_free_async_mem(vq); - - vq->async_ops.transfer_data = NULL; - vq->async_ops.check_completed_copies = NULL; - vq->async_registered = false; - out: rte_spinlock_unlock(&vq->access_lock); @@ -1841,21 +1817,18 @@ rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id) if (vq == NULL) return -1; - if (!vq->async_registered) + if (!vq->async) return 0; - if (vq->async_pkts_inflight_n) { - VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " - "async inflight packets must be completed before unregistration.\n"); + if (vq->async->pkts_inflight_n) { + VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname); + VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n", + dev->ifname); return -1; } vhost_free_async_mem(vq); - vq->async_ops.transfer_data = NULL; - vq->async_ops.check_completed_copies = NULL; - vq->async_registered = false; - return 0; } @@ -1877,20 +1850,59 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id) if (vq == NULL) return ret; - if (!vq->async_registered) + if (!vq->async) return ret; if (!rte_spinlock_trylock(&vq->access_lock)) { - VHOST_LOG_CONFIG(DEBUG, "Failed to check in-flight packets. " - "virt queue busy.\n"); + VHOST_LOG_CONFIG(DEBUG, + "(%s) failed to check in-flight packets. virtqueue busy.\n", + dev->ifname); return ret; } - ret = vq->async_pkts_inflight_n; + ret = vq->async->pkts_inflight_n; rte_spinlock_unlock(&vq->access_lock); return ret; } +int +rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, + struct rte_vhost_power_monitor_cond *pmc) +{ + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq; + + if (dev == NULL) + return -1; + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + if (vq == NULL) + return -1; + + if (vq_is_packed(dev)) { + struct vring_packed_desc *desc; + desc = vq->desc_packed; + pmc->addr = &desc[vq->last_avail_idx].flags; + if (vq->avail_wrap_counter) + pmc->val = VRING_DESC_F_AVAIL; + else + pmc->val = VRING_DESC_F_USED; + pmc->mask = VRING_DESC_F_AVAIL | VRING_DESC_F_USED; + pmc->size = sizeof(desc[vq->last_avail_idx].flags); + pmc->match = 1; + } else { + pmc->addr = &vq->avail->idx; + pmc->val = vq->last_avail_idx & (vq->size - 1); + pmc->mask = vq->size - 1; + pmc->size = sizeof(vq->avail->idx); + pmc->match = 0; + } + + return 0; +} + RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO); RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);