X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fvhost%2Fvhost.c;h=13a9bb9dd1769a3b3e5e7898ec3acb661c6b28c8;hb=f01ca13fca6dde68fa3962269c5cddb7e8e7f1d7;hp=c9b6379f73757a53b2809fa0b4571634e4702259;hpb=873e8dad6f493637da02d52d7ad190bda0473758;p=dpdk.git diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index c9b6379f73..13a9bb9dd1 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -261,7 +261,7 @@ vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t src, dst; uint64_t len, remain = desc_len; - idesc = rte_malloc(__func__, desc_len, 0); + idesc = rte_malloc_socket(__func__, desc_len, 0, vq->numa_node); if (unlikely(!idesc)) return NULL; @@ -312,10 +312,10 @@ cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq) if (vq->resubmit_inflight) { if (vq->resubmit_inflight->resubmit_list) { - free(vq->resubmit_inflight->resubmit_list); + rte_free(vq->resubmit_inflight->resubmit_list); vq->resubmit_inflight->resubmit_list = NULL; } - free(vq->resubmit_inflight); + rte_free(vq->resubmit_inflight); vq->resubmit_inflight = NULL; } } @@ -340,19 +340,18 @@ cleanup_device(struct virtio_net *dev, int destroy) static void vhost_free_async_mem(struct vhost_virtqueue *vq) { - rte_free(vq->async_pkts_info); + if (!vq->async) + return; - rte_free(vq->async_buffers_packed); - vq->async_buffers_packed = NULL; - rte_free(vq->async_descs_split); - vq->async_descs_split = NULL; + rte_free(vq->async->pkts_info); - rte_free(vq->it_pool); - rte_free(vq->vec_pool); + rte_free(vq->async->buffers_packed); + vq->async->buffers_packed = NULL; + rte_free(vq->async->descs_split); + vq->async->descs_split = NULL; - vq->async_pkts_info = NULL; - vq->it_pool = NULL; - vq->vec_pool = NULL; + rte_free(vq->async); + vq->async = NULL; } void @@ -365,8 +364,7 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq) vhost_free_async_mem(vq); rte_free(vq->batch_copy_elems); - if (vq->iotlb_pool) - rte_mempool_free(vq->iotlb_pool); + rte_mempool_free(vq->iotlb_pool); rte_free(vq->log_cache); rte_free(vq); } @@ -550,6 +548,7 @@ static void init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) { struct vhost_virtqueue *vq; + int numa_node = SOCKET_ID_ANY; if (vring_idx >= VHOST_MAX_VRING) { VHOST_LOG_CONFIG(ERR, @@ -570,6 +569,17 @@ init_vring_queue(struct virtio_net *dev, uint32_t vring_idx) vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF; + +#ifdef RTE_LIBRTE_VHOST_NUMA + if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) { + VHOST_LOG_CONFIG(ERR, "(%d) failed to query numa node: %s\n", + dev->vid, rte_strerror(errno)); + numa_node = SOCKET_ID_ANY; + } +#endif + vq->numa_node = numa_node; + + vhost_user_iotlb_init(dev, vring_idx); } static void @@ -752,7 +762,7 @@ vhost_set_ifname(int vid, const char *if_name, unsigned int if_len) } void -vhost_set_builtin_virtio_net(int vid, bool enable) +vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags) { struct virtio_net *dev = get_device(vid); @@ -763,6 +773,10 @@ vhost_set_builtin_virtio_net(int vid, bool enable) dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET; else dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET; + if (!compliant_ol_flags) + dev->flags |= VIRTIO_DEV_LEGACY_OL_FLAGS; + else + dev->flags &= ~VIRTIO_DEV_LEGACY_OL_FLAGS; } void @@ -1211,6 +1225,9 @@ rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx, if (unlikely(!vq->inflight_split)) return -1; + if (unlikely(idx >= vq->size)) + return -1; + vq->inflight_split->last_inflight_io = idx; return 0; } @@ -1485,7 +1502,8 @@ rte_vhost_get_vdpa_device(int vid) return dev->vdpa_dev; } -int rte_vhost_get_log_base(int vid, uint64_t *log_base, +int +rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size) { struct virtio_net *dev = get_device(vid); @@ -1499,7 +1517,8 @@ int rte_vhost_get_log_base(int vid, uint64_t *log_base, return 0; } -int rte_vhost_get_vring_base(int vid, uint16_t queue_id, +int +rte_vhost_get_vring_base(int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx) { struct vhost_virtqueue *vq; @@ -1528,7 +1547,8 @@ int rte_vhost_get_vring_base(int vid, uint16_t queue_id, return 0; } -int rte_vhost_set_vring_base(int vid, uint16_t queue_id, +int +rte_vhost_set_vring_base(int vid, uint16_t queue_id, uint16_t last_avail_idx, uint16_t last_used_idx) { struct vhost_virtqueue *vq; @@ -1591,7 +1611,8 @@ rte_vhost_get_vring_base_from_inflight(int vid, return 0; } -int rte_vhost_extern_callback_register(int vid, +int +rte_vhost_extern_callback_register(int vid, struct rte_vhost_user_extern_ops const * const ops, void *ctx) { struct virtio_net *dev = get_device(vid); @@ -1604,20 +1625,83 @@ int rte_vhost_extern_callback_register(int vid, return 0; } -int rte_vhost_async_channel_register(int vid, uint16_t queue_id, - uint32_t features, - struct rte_vhost_async_channel_ops *ops) +static __rte_always_inline int +async_channel_register(int vid, uint16_t queue_id, + struct rte_vhost_async_channel_ops *ops) +{ + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq = dev->virtqueue[queue_id]; + struct vhost_async *async; + int node = vq->numa_node; + + if (unlikely(vq->async)) { + VHOST_LOG_CONFIG(ERR, + "async register failed: already registered (vid %d, qid: %d)\n", + vid, queue_id); + return -1; + } + + async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node); + if (!async) { + VHOST_LOG_CONFIG(ERR, "failed to allocate async metadata (vid %d, qid: %d)\n", + vid, queue_id); + return -1; + } + + async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info), + RTE_CACHE_LINE_SIZE, node); + if (!async->pkts_info) { + VHOST_LOG_CONFIG(ERR, "failed to allocate async_pkts_info (vid %d, qid: %d)\n", + vid, queue_id); + goto out_free_async; + } + + if (vq_is_packed(dev)) { + async->buffers_packed = rte_malloc_socket(NULL, + vq->size * sizeof(struct vring_used_elem_packed), + RTE_CACHE_LINE_SIZE, node); + if (!async->buffers_packed) { + VHOST_LOG_CONFIG(ERR, "failed to allocate async buffers (vid %d, qid: %d)\n", + vid, queue_id); + goto out_free_inflight; + } + } else { + async->descs_split = rte_malloc_socket(NULL, + vq->size * sizeof(struct vring_used_elem), + RTE_CACHE_LINE_SIZE, node); + if (!async->descs_split) { + VHOST_LOG_CONFIG(ERR, "failed to allocate async descs (vid %d, qid: %d)\n", + vid, queue_id); + goto out_free_inflight; + } + } + + async->ops.check_completed_copies = ops->check_completed_copies; + async->ops.transfer_data = ops->transfer_data; + + vq->async = async; + + return 0; +out_free_inflight: + rte_free(async->pkts_info); +out_free_async: + rte_free(async); + + return -1; +} + +int +rte_vhost_async_channel_register(int vid, uint16_t queue_id, + struct rte_vhost_async_config config, + struct rte_vhost_async_channel_ops *ops) { struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); - struct rte_vhost_async_features f; - int node; + int ret; if (dev == NULL || ops == NULL) return -1; - f.intval = features; - if (queue_id >= VHOST_MAX_VRING) return -1; @@ -1626,7 +1710,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id, if (unlikely(vq == NULL || !dev->async_copy)) return -1; - if (unlikely(!f.async_inorder)) { + if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) { VHOST_LOG_CONFIG(ERR, "async copy is not supported on non-inorder mode " "(vid %d, qid: %d)\n", vid, queue_id); @@ -1638,97 +1722,47 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id, return -1; rte_spinlock_lock(&vq->access_lock); + ret = async_channel_register(vid, queue_id, ops); + rte_spinlock_unlock(&vq->access_lock); - if (unlikely(vq->async_registered)) { - VHOST_LOG_CONFIG(ERR, - "async register failed: channel already registered " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } + return ret; +} -#ifdef RTE_LIBRTE_VHOST_NUMA - if (get_mempolicy(&node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) { - VHOST_LOG_CONFIG(ERR, - "unable to get numa information in async register. " - "allocating async buffer memory on the caller thread node\n"); - node = SOCKET_ID_ANY; - } -#else - node = SOCKET_ID_ANY; -#endif +int +rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id, + struct rte_vhost_async_config config, + struct rte_vhost_async_channel_ops *ops) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); - vq->async_pkts_info = rte_malloc_socket(NULL, - vq->size * sizeof(struct async_inflight_info), - RTE_CACHE_LINE_SIZE, node); - if (!vq->async_pkts_info) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async_pkts_info " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } + if (dev == NULL || ops == NULL) + return -1; - vq->it_pool = rte_malloc_socket(NULL, - VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter), - RTE_CACHE_LINE_SIZE, node); - if (!vq->it_pool) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for it_pool " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } + if (queue_id >= VHOST_MAX_VRING) + return -1; - vq->vec_pool = rte_malloc_socket(NULL, - VHOST_MAX_ASYNC_VEC * sizeof(struct iovec), - RTE_CACHE_LINE_SIZE, node); - if (!vq->vec_pool) { - vhost_free_async_mem(vq); + vq = dev->virtqueue[queue_id]; + + if (unlikely(vq == NULL || !dev->async_copy)) + return -1; + + if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) { VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for vec_pool " + "async copy is not supported on non-inorder mode " "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } - - if (vq_is_packed(dev)) { - vq->async_buffers_packed = rte_malloc_socket(NULL, - vq->size * sizeof(struct vring_used_elem_packed), - RTE_CACHE_LINE_SIZE, node); - if (!vq->async_buffers_packed) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async buffers " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } - } else { - vq->async_descs_split = rte_malloc_socket(NULL, - vq->size * sizeof(struct vring_used_elem), - RTE_CACHE_LINE_SIZE, node); - if (!vq->async_descs_split) { - vhost_free_async_mem(vq); - VHOST_LOG_CONFIG(ERR, - "async register failed: cannot allocate memory for async descs " - "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; - } + return -1; } - vq->async_ops.check_completed_copies = ops->check_completed_copies; - vq->async_ops.transfer_data = ops->transfer_data; - - vq->async_inorder = f.async_inorder; - vq->async_threshold = f.async_threshold; - - vq->async_registered = true; - -reg_out: - rte_spinlock_unlock(&vq->access_lock); + if (unlikely(ops->check_completed_copies == NULL || + ops->transfer_data == NULL)) + return -1; - return 0; + return async_channel_register(vid, queue_id, ops); } -int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) +int +rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) { struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); @@ -1747,7 +1781,7 @@ int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) ret = 0; - if (!vq->async_registered) + if (!vq->async) return ret; if (!rte_spinlock_trylock(&vq->access_lock)) { @@ -1756,7 +1790,7 @@ int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) return -1; } - if (vq->async_pkts_inflight_n) { + if (vq->async->pkts_inflight_n) { VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " "async inflight packets must be completed before unregistration.\n"); ret = -1; @@ -1764,16 +1798,113 @@ int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id) } vhost_free_async_mem(vq); +out: + rte_spinlock_unlock(&vq->access_lock); - vq->async_ops.transfer_data = NULL; - vq->async_ops.check_completed_copies = NULL; - vq->async_registered = false; + return ret; +} -out: +int +rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + + if (dev == NULL) + return -1; + + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + + if (vq == NULL) + return -1; + + if (!vq->async) + return 0; + + if (vq->async->pkts_inflight_n) { + VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " + "async inflight packets must be completed before unregistration.\n"); + return -1; + } + + vhost_free_async_mem(vq); + + return 0; +} + +int +rte_vhost_async_get_inflight(int vid, uint16_t queue_id) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + int ret = -1; + + if (dev == NULL) + return ret; + + if (queue_id >= VHOST_MAX_VRING) + return ret; + + vq = dev->virtqueue[queue_id]; + + if (vq == NULL) + return ret; + + if (!vq->async) + return ret; + + if (!rte_spinlock_trylock(&vq->access_lock)) { + VHOST_LOG_CONFIG(DEBUG, "Failed to check in-flight packets. " + "virt queue busy.\n"); + return ret; + } + + ret = vq->async->pkts_inflight_n; rte_spinlock_unlock(&vq->access_lock); return ret; } -RTE_LOG_REGISTER(vhost_config_log_level, lib.vhost.config, INFO); -RTE_LOG_REGISTER(vhost_data_log_level, lib.vhost.data, WARNING); +int +rte_vhost_get_monitor_addr(int vid, uint16_t queue_id, + struct rte_vhost_power_monitor_cond *pmc) +{ + struct virtio_net *dev = get_device(vid); + struct vhost_virtqueue *vq; + + if (dev == NULL) + return -1; + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + if (vq == NULL) + return -1; + + if (vq_is_packed(dev)) { + struct vring_packed_desc *desc; + desc = vq->desc_packed; + pmc->addr = &desc[vq->last_avail_idx].flags; + if (vq->avail_wrap_counter) + pmc->val = VRING_DESC_F_AVAIL; + else + pmc->val = VRING_DESC_F_USED; + pmc->mask = VRING_DESC_F_AVAIL | VRING_DESC_F_USED; + pmc->size = sizeof(desc[vq->last_avail_idx].flags); + pmc->match = 1; + } else { + pmc->addr = &vq->avail->idx; + pmc->val = vq->last_avail_idx & (vq->size - 1); + pmc->mask = vq->size - 1; + pmc->size = sizeof(vq->avail->idx); + pmc->match = 0; + } + + return 0; +} + +RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO); +RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);