vhost invokes this function to get the copy data completed by async
devices.
+* ``rte_vhost_async_channel_register_thread_unsafe(vid, queue_id, config, ops)``
+
+ Register an async copy device channel for a vhost queue without
+ performing any locking.
+
+ This function is only safe to call in vhost callback functions
+ (i.e., struct vhost_device_ops).
+
* ``rte_vhost_async_channel_unregister(vid, queue_id)``
Unregister the async copy device channel from a vhost queue.
devices for all vhost queues in destroy_device(), when a
virtio device is paused or shut down.
+* ``rte_vhost_async_channel_unregister_thread_unsafe(vid, queue_id)``
+
+ Unregister the async copy device channel for a vhost queue without
+ performing any locking.
+
+ This function is only safe to call in vhost callback functions
+ (i.e., struct vhost_device_ops).
+
* ``rte_vhost_submit_enqueue_burst(vid, queue_id, pkts, count, comp_pkts, comp_count)``
Submit an enqueue request to transmit ``count`` packets from host to guest
__rte_experimental
int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id);
+/**
+ * Register an async channel for a vhost queue without performing any
+ * locking
+ *
+ * @note This function does not perform any locking, and is only safe to
+ * call in vhost callback functions.
+ *
+ * @param vid
+ * vhost device id async channel to be attached to
+ * @param queue_id
+ * vhost queue id async channel to be attached to
+ * @param config
+ * Async channel configuration
+ * @param ops
+ * Async channel operation callbacks
+ * @return
+ * 0 on success, -1 on failures
+ */
+__rte_experimental
+int rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_vhost_async_config config,
+ struct rte_vhost_async_channel_ops *ops);
+
+/**
+ * Unregister an async channel for a vhost queue without performing any
+ * locking
+ *
+ * @note This function does not perform any locking, and is only safe to
+ * call in vhost callback functions.
+ *
+ * @param vid
+ * vhost device id async channel to be detached from
+ * @param queue_id
+ * vhost queue id async channel to be detached from
+ * @return
+ * 0 on success, -1 on failures
+ */
+__rte_experimental
+int rte_vhost_async_channel_unregister_thread_unsafe(int vid,
+ uint16_t queue_id);
+
/**
* This function submits enqueue data to async engine. Successfully
* enqueued packets can be transfer completed or being occupied by DMA
return 0;
}
-int
-rte_vhost_async_channel_register(int vid, uint16_t queue_id,
+static __rte_always_inline int
+async_channel_register(int vid, uint16_t queue_id,
struct rte_vhost_async_config config,
struct rte_vhost_async_channel_ops *ops)
{
- struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
-
- if (dev == NULL || ops == NULL)
- return -1;
-
- if (queue_id >= VHOST_MAX_VRING)
- return -1;
-
- vq = dev->virtqueue[queue_id];
-
- if (unlikely(vq == NULL || !dev->async_copy))
- return -1;
-
- if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
- VHOST_LOG_CONFIG(ERR,
- "async copy is not supported on non-inorder mode "
- "(vid %d, qid: %d)\n", vid, queue_id);
- return -1;
- }
-
- if (unlikely(ops->check_completed_copies == NULL ||
- ops->transfer_data == NULL))
- return -1;
-
- rte_spinlock_lock(&vq->access_lock);
+ struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
if (unlikely(vq->async_registered)) {
VHOST_LOG_CONFIG(ERR,
"async register failed: channel already registered "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->async_pkts_info = rte_malloc_socket(NULL,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async_pkts_info "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->it_pool = rte_malloc_socket(NULL,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for it_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
vq->vec_pool = rte_malloc_socket(NULL,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for vec_pool "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
if (vq_is_packed(dev)) {
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async buffers "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
} else {
vq->async_descs_split = rte_malloc_socket(NULL,
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for async descs "
"(vid %d, qid: %d)\n", vid, queue_id);
- goto reg_out;
+ return -1;
}
}
vq->async_registered = true;
-reg_out:
+ return 0;
+}
+
+int
+rte_vhost_async_channel_register(int vid, uint16_t queue_id,
+ struct rte_vhost_async_config config,
+ struct rte_vhost_async_channel_ops *ops)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+ int ret;
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(vq == NULL || !dev->async_copy))
+ return -1;
+
+ if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
+ VHOST_LOG_CONFIG(ERR,
+ "async copy is not supported on non-inorder mode "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ return -1;
+ }
+
+ if (unlikely(ops->check_completed_copies == NULL ||
+ ops->transfer_data == NULL))
+ return -1;
+
+ rte_spinlock_lock(&vq->access_lock);
+ ret = async_channel_register(vid, queue_id, config, ops);
rte_spinlock_unlock(&vq->access_lock);
- return 0;
+ return ret;
+}
+
+int
+rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_vhost_async_config config,
+ struct rte_vhost_async_channel_ops *ops)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(vq == NULL || !dev->async_copy))
+ return -1;
+
+ if (unlikely(!(config.features & RTE_VHOST_ASYNC_INORDER))) {
+ VHOST_LOG_CONFIG(ERR,
+ "async copy is not supported on non-inorder mode "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ return -1;
+ }
+
+ if (unlikely(ops->check_completed_copies == NULL ||
+ ops->transfer_data == NULL))
+ return -1;
+
+ return async_channel_register(vid, queue_id, config, ops);
}
int
return ret;
}
+int
+rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return -1;
+
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (vq == NULL)
+ return -1;
+
+ if (!vq->async_registered)
+ return 0;
+
+ if (vq->async_pkts_inflight_n) {
+ VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ "async inflight packets must be completed before unregistration.\n");
+ return -1;
+ }
+
+ vhost_free_async_mem(vq);
+
+ vq->async_ops.transfer_data = NULL;
+ vq->async_ops.check_completed_copies = NULL;
+ vq->async_registered = false;
+
+ return 0;
+}
+
int rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
{
struct vhost_virtqueue *vq;
rte_spinlock_unlock(&vq->access_lock);
return ret;
-
}
RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);