}
}
+static void
+vhost_free_async_mem(struct vhost_virtqueue *vq)
+{
+ if (vq->async_pkts_pending)
+ rte_free(vq->async_pkts_pending);
+ if (vq->async_pkts_info)
+ rte_free(vq->async_pkts_info);
+ if (vq->it_pool)
+ rte_free(vq->it_pool);
+ if (vq->vec_pool)
+ rte_free(vq->vec_pool);
+
+ vq->async_pkts_pending = NULL;
+ vq->async_pkts_info = NULL;
+ vq->it_pool = NULL;
+ vq->vec_pool = NULL;
+}
+
void
free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
if (vq_is_packed(dev))
rte_free(vq->shadow_used_packed);
- else
+ else {
rte_free(vq->shadow_used_split);
+ vhost_free_async_mem(vq);
+ }
rte_free(vq->batch_copy_elems);
rte_mempool_free(vq->iotlb_pool);
rte_free(vq);
vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF;
vhost_user_iotlb_init(dev, vring_idx);
/* Backends are set to -1 indicating an inactive device. */
vq->backend = -1;
-
- TAILQ_INIT(&vq->zmbuf_list);
}
static void
if (dev->flags & VIRTIO_DEV_RUNNING) {
vdpa_dev = dev->vdpa_dev;
- if (vdpa_dev && vdpa_dev->ops->dev_close)
+ if (vdpa_dev)
vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_RUNNING;
dev->notify_ops->destroy_device(dev->vid);
dev->ifname[sizeof(dev->ifname) - 1] = '\0';
}
-void
-vhost_enable_dequeue_zero_copy(int vid)
-{
- struct virtio_net *dev = get_device(vid);
-
- if (dev == NULL)
- return;
-
- dev->dequeue_zero_copy = 1;
-}
-
void
vhost_set_builtin_virtio_net(int vid, bool enable)
{
return 0;
}
+int
+vhost_enable_guest_notification(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, int enable)
+{
+ /*
+ * If the virtqueue is not ready yet, it will be applied
+ * when it will become ready.
+ */
+ if (!vq->ready)
+ return 0;
+
+ if (vq_is_packed(dev))
+ return vhost_enable_notify_packed(dev, vq, enable);
+ else
+ return vhost_enable_notify_split(dev, vq, enable);
+}
+
int
rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
{
rte_spinlock_lock(&vq->access_lock);
- if (vq_is_packed(dev))
- ret = vhost_enable_notify_packed(dev, vq, enable);
- else
- ret = vhost_enable_notify_split(dev, vq, enable);
+ vq->notif_enable = enable;
+ ret = vhost_enable_guest_notification(dev, vq, enable);
rte_spinlock_unlock(&vq->access_lock);
return 0;
}
+int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
+ uint32_t features,
+ struct rte_vhost_async_channel_ops *ops)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+ struct rte_vhost_async_features f;
+ int node;
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ f.intval = features;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(vq == NULL || !dev->async_copy))
+ return -1;
+
+ /* packed queue is not supported */
+ if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
+ VHOST_LOG_CONFIG(ERR,
+ "async copy is not supported on packed queue or non-inorder mode "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ return -1;
+ }
+
+ if (unlikely(ops->check_completed_copies == NULL ||
+ ops->transfer_data == NULL))
+ return -1;
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(vq->async_registered)) {
+ VHOST_LOG_CONFIG(ERR,
+ "async register failed: channel already registered "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ goto reg_out;
+ }
+
+#ifdef RTE_LIBRTE_VHOST_NUMA
+ if (get_mempolicy(&node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
+ VHOST_LOG_CONFIG(ERR,
+ "unable to get numa information in async register. "
+ "allocating async buffer memory on the caller thread node\n");
+ node = SOCKET_ID_ANY;
+ }
+#else
+ node = SOCKET_ID_ANY;
+#endif
+
+ vq->async_pkts_pending = rte_malloc_socket(NULL,
+ vq->size * sizeof(uintptr_t),
+ RTE_CACHE_LINE_SIZE, node);
+ vq->async_pkts_info = rte_malloc_socket(NULL,
+ vq->size * sizeof(struct async_inflight_info),
+ RTE_CACHE_LINE_SIZE, node);
+ vq->it_pool = rte_malloc_socket(NULL,
+ VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
+ RTE_CACHE_LINE_SIZE, node);
+ vq->vec_pool = rte_malloc_socket(NULL,
+ VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
+ RTE_CACHE_LINE_SIZE, node);
+ if (!vq->async_pkts_pending || !vq->async_pkts_info ||
+ !vq->it_pool || !vq->vec_pool) {
+ vhost_free_async_mem(vq);
+ VHOST_LOG_CONFIG(ERR,
+ "async register failed: cannot allocate memory for vq data "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ goto reg_out;
+ }
+
+ vq->async_ops.check_completed_copies = ops->check_completed_copies;
+ vq->async_ops.transfer_data = ops->transfer_data;
+
+ vq->async_inorder = f.async_inorder;
+ vq->async_threshold = f.async_threshold;
+
+ vq->async_registered = true;
+
+reg_out:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return 0;
+}
+
+int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+ int ret = -1;
+
+ if (dev == NULL)
+ return ret;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (vq == NULL)
+ return ret;
+
+ ret = 0;
+
+ if (!vq->async_registered)
+ return ret;
+
+ if (!rte_spinlock_trylock(&vq->access_lock)) {
+ VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ "virt queue busy.\n");
+ return -1;
+ }
+
+ if (vq->async_pkts_inflight_n) {
+ VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ "async inflight packets must be completed before unregistration.\n");
+ ret = -1;
+ goto out;
+ }
+
+ vhost_free_async_mem(vq);
+
+ vq->async_ops.transfer_data = NULL;
+ vq->async_ops.check_completed_copies = NULL;
+ vq->async_registered = false;
+
+out:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return ret;
+}
+
RTE_LOG_REGISTER(vhost_config_log_level, lib.vhost.config, INFO);
RTE_LOG_REGISTER(vhost_data_log_level, lib.vhost.data, WARNING);