X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fvhost%2Fvhost.c;h=3b674ac3209096c0dc9869a13c4ac396a2d44406;hb=f2777b53b189813e9c22e2cf0bbecb9327f8dcce;hp=b6e78bb4b7f3f68e78532b08fb2c53048e223228;hpb=acbc38887b0a8c564e1f3fe1d9f65dfeb0d3c57a;p=dpdk.git diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index b6e78bb4b7..3b674ac320 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -1226,6 +1226,9 @@ rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx, if (unlikely(!vq->inflight_split)) return -1; + if (unlikely(idx >= vq->size)) + return -1; + vq->inflight_split->last_inflight_io = idx; return 0; } @@ -1500,7 +1503,8 @@ rte_vhost_get_vdpa_device(int vid) return dev->vdpa_dev; } -int rte_vhost_get_log_base(int vid, uint64_t *log_base, +int +rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size) { struct virtio_net *dev = get_device(vid); @@ -1514,7 +1518,8 @@ int rte_vhost_get_log_base(int vid, uint64_t *log_base, return 0; } -int rte_vhost_get_vring_base(int vid, uint16_t queue_id, +int +rte_vhost_get_vring_base(int vid, uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx) { struct vhost_virtqueue *vq; @@ -1543,7 +1548,8 @@ int rte_vhost_get_vring_base(int vid, uint16_t queue_id, return 0; } -int rte_vhost_set_vring_base(int vid, uint16_t queue_id, +int +rte_vhost_set_vring_base(int vid, uint16_t queue_id, uint16_t last_avail_idx, uint16_t last_used_idx) { struct vhost_virtqueue *vq; @@ -1606,7 +1612,8 @@ rte_vhost_get_vring_base_from_inflight(int vid, return 0; } -int rte_vhost_extern_callback_register(int vid, +int +rte_vhost_extern_callback_register(int vid, struct rte_vhost_user_extern_ops const * const ops, void *ctx) { struct virtio_net *dev = get_device(vid); @@ -1619,43 +1626,18 @@ int rte_vhost_extern_callback_register(int vid, return 0; } -int -rte_vhost_async_channel_register(int vid, uint16_t queue_id, - struct rte_vhost_async_config config, +static __rte_always_inline int +async_channel_register(int vid, uint16_t queue_id, struct rte_vhost_async_channel_ops *ops) { - struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); - - if (dev == NULL || ops == NULL) - return -1; - - if (queue_id >= VHOST_MAX_VRING) - return -1; - - vq = dev->virtqueue[queue_id]; - - if (unlikely(vq == NULL || !dev->async_copy)) - return -1; - - if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) { - VHOST_LOG_CONFIG(ERR, - "async copy is not supported on non-inorder mode " - "(vid %d, qid: %d)\n", vid, queue_id); - return -1; - } - - if (unlikely(ops->check_completed_copies == NULL || - ops->transfer_data == NULL)) - return -1; - - rte_spinlock_lock(&vq->access_lock); + struct vhost_virtqueue *vq = dev->virtqueue[queue_id]; if (unlikely(vq->async_registered)) { VHOST_LOG_CONFIG(ERR, "async register failed: channel already registered " "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; + return -1; } vq->async_pkts_info = rte_malloc_socket(NULL, @@ -1666,7 +1648,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id, VHOST_LOG_CONFIG(ERR, "async register failed: cannot allocate memory for async_pkts_info " "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; + return -1; } vq->it_pool = rte_malloc_socket(NULL, @@ -1677,7 +1659,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id, VHOST_LOG_CONFIG(ERR, "async register failed: cannot allocate memory for it_pool " "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; + return -1; } vq->vec_pool = rte_malloc_socket(NULL, @@ -1688,7 +1670,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id, VHOST_LOG_CONFIG(ERR, "async register failed: cannot allocate memory for vec_pool " "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; + return -1; } if (vq_is_packed(dev)) { @@ -1700,7 +1682,7 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id, VHOST_LOG_CONFIG(ERR, "async register failed: cannot allocate memory for async buffers " "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; + return -1; } } else { vq->async_descs_split = rte_malloc_socket(NULL, @@ -1711,20 +1693,87 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id, VHOST_LOG_CONFIG(ERR, "async register failed: cannot allocate memory for async descs " "(vid %d, qid: %d)\n", vid, queue_id); - goto reg_out; + return -1; } } vq->async_ops.check_completed_copies = ops->check_completed_copies; vq->async_ops.transfer_data = ops->transfer_data; - vq->async_threshold = config.async_threshold; vq->async_registered = true; -reg_out: + return 0; +} + +int +rte_vhost_async_channel_register(int vid, uint16_t queue_id, + struct rte_vhost_async_config config, + struct rte_vhost_async_channel_ops *ops) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + int ret; + + if (dev == NULL || ops == NULL) + return -1; + + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + + if (unlikely(vq == NULL || !dev->async_copy)) + return -1; + + if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) { + VHOST_LOG_CONFIG(ERR, + "async copy is not supported on non-inorder mode " + "(vid %d, qid: %d)\n", vid, queue_id); + return -1; + } + + if (unlikely(ops->check_completed_copies == NULL || + ops->transfer_data == NULL)) + return -1; + + rte_spinlock_lock(&vq->access_lock); + ret = async_channel_register(vid, queue_id, ops); rte_spinlock_unlock(&vq->access_lock); - return 0; + return ret; +} + +int +rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id, + struct rte_vhost_async_config config, + struct rte_vhost_async_channel_ops *ops) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + + if (dev == NULL || ops == NULL) + return -1; + + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + + if (unlikely(vq == NULL || !dev->async_copy)) + return -1; + + if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) { + VHOST_LOG_CONFIG(ERR, + "async copy is not supported on non-inorder mode " + "(vid %d, qid: %d)\n", vid, queue_id); + return -1; + } + + if (unlikely(ops->check_completed_copies == NULL || + ops->transfer_data == NULL)) + return -1; + + return async_channel_register(vid, queue_id, ops); } int @@ -1775,7 +1824,43 @@ out: return ret; } -int rte_vhost_async_get_inflight(int vid, uint16_t queue_id) +int +rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id) +{ + struct vhost_virtqueue *vq; + struct virtio_net *dev = get_device(vid); + + if (dev == NULL) + return -1; + + if (queue_id >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[queue_id]; + + if (vq == NULL) + return -1; + + if (!vq->async_registered) + return 0; + + if (vq->async_pkts_inflight_n) { + VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. " + "async inflight packets must be completed before unregistration.\n"); + return -1; + } + + vhost_free_async_mem(vq); + + vq->async_ops.transfer_data = NULL; + vq->async_ops.check_completed_copies = NULL; + vq->async_registered = false; + + return 0; +} + +int +rte_vhost_async_get_inflight(int vid, uint16_t queue_id) { struct vhost_virtqueue *vq; struct virtio_net *dev = get_device(vid); @@ -1805,7 +1890,6 @@ int rte_vhost_async_get_inflight(int vid, uint16_t queue_id) rte_spinlock_unlock(&vq->access_lock); return ret; - } RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);