return dev->vdpa_dev;
}
-int rte_vhost_get_log_base(int vid, uint64_t *log_base,
+int
+rte_vhost_get_log_base(int vid, uint64_t *log_base,
uint64_t *log_size)
{
struct virtio_net *dev = get_device(vid);
return 0;
}
-int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
+int
+rte_vhost_get_vring_base(int vid, uint16_t queue_id,
uint16_t *last_avail_idx, uint16_t *last_used_idx)
{
struct vhost_virtqueue *vq;
return 0;
}
-int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
+int
+rte_vhost_set_vring_base(int vid, uint16_t queue_id,
uint16_t last_avail_idx, uint16_t last_used_idx)
{
struct vhost_virtqueue *vq;
return 0;
}
-int rte_vhost_extern_callback_register(int vid,
+int
+rte_vhost_extern_callback_register(int vid,
struct rte_vhost_user_extern_ops const * const ops, void *ctx)
{
struct virtio_net *dev = get_device(vid);
return 0;
}
-int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
- uint32_t features,
- struct rte_vhost_async_channel_ops *ops)
+static __rte_always_inline int
+async_channel_register(int vid, uint16_t queue_id,
+ struct rte_vhost_async_channel_ops *ops)
{
- struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
- struct rte_vhost_async_features f;
-
- if (dev == NULL || ops == NULL)
- return -1;
-
- f.intval = features;
-
- if (queue_id >= VHOST_MAX_VRING)
- return -1;
-
- vq = dev->virtqueue[queue_id];
-
- if (unlikely(vq == NULL || !dev->async_copy))
- return -1;
-
- if (unlikely(!f.async_inorder)) {
- VHOST_LOG_CONFIG(ERR,
- "async copy is not supported on non-inorder mode "
- "(vid %d, qid: %d)\n", vid, queue_id);
- return -1;
- }
-
- if (unlikely(ops->check_completed_copies == NULL ||
- ops->transfer_data == NULL))
- return -1;
-
- rte_spinlock_lock(&vq->access_lock);
+ struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
if (unlikely(vq->async_registered)) {
VHOST_LOG_CONFIG(ERR,
"async register failed: channel already registered "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->async_pkts_info = rte_malloc_socket(NULL,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async_pkts_info "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->it_pool = rte_malloc_socket(NULL,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for it_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->vec_pool = rte_malloc_socket(NULL,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for vec_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
if (vq_is_packed(dev)) {
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async buffers "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
} else {
vq->async_descs_split = rte_malloc_socket(NULL,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async descs "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
}
vq->async_ops.check_completed_copies = ops->check_completed_copies;
vq->async_ops.transfer_data = ops->transfer_data;
- vq->async_inorder = f.async_inorder;
- vq->async_threshold = f.async_threshold;
-
vq->async_registered = true;
-reg_out:
+ return 0;
+}
+
+int
+rte_vhost_async_channel_register(int vid, uint16_t queue_id,
+ struct rte_vhost_async_config config,
+ struct rte_vhost_async_channel_ops *ops)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+ int ret;
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(vq == NULL || !dev->async_copy))
+ return -1;
+
+ if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
+ VHOST_LOG_CONFIG(ERR,
+ "async copy is not supported on non-inorder mode "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ return -1;
+ }
+
+ if (unlikely(ops->check_completed_copies == NULL ||
+ ops->transfer_data == NULL))
+ return -1;
+
+ rte_spinlock_lock(&vq->access_lock);
+ ret = async_channel_register(vid, queue_id, ops);
rte_spinlock_unlock(&vq->access_lock);
- return 0;
+ return ret;
}
-int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
+int
+rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_vhost_async_config config,
+ struct rte_vhost_async_channel_ops *ops)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(vq == NULL || !dev->async_copy))
+ return -1;
+
+ if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
+ VHOST_LOG_CONFIG(ERR,
+ "async copy is not supported on non-inorder mode "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ return -1;
+ }
+
+ if (unlikely(ops->check_completed_copies == NULL ||
+ ops->transfer_data == NULL))
+ return -1;
+
+ return async_channel_register(vid, queue_id, ops);
+}
+
+int
+rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
{
struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
return ret;
}
+int
+rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (vq == NULL)
+ return -1;
+
+ if (!vq->async_registered)
+ return 0;
+
+ if (vq->async_pkts_inflight_n) {
+ VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ "async inflight packets must be completed before unregistration.\n");
+ return -1;
+ }
+
+ vhost_free_async_mem(vq);
+
+ vq->async_ops.transfer_data = NULL;
+ vq->async_ops.check_completed_copies = NULL;
+ vq->async_registered = false;
+
+ return 0;
+}
+
+int
+rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+ int ret = -1;
+
+ if (dev == NULL)
+ return ret;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return ret;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (vq == NULL)
+ return ret;
+
+ if (!vq->async_registered)
+ return ret;
+
+ if (!rte_spinlock_trylock(&vq->access_lock)) {
+ VHOST_LOG_CONFIG(DEBUG, "Failed to check in-flight packets. "
+ "virt queue busy.\n");
+ return ret;
+ }
+
+ ret = vq->async_pkts_inflight_n;
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return ret;
+}
+
RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);