vhost_user_iotlb_pending_insert(vq, iova, perm);
if (vhost_user_iotlb_miss(dev, iova, perm)) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
iova);
vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
if (map_len != len) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_DATA(ERR,
"Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
iova);
return;
hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
if (map_len != len) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_DATA(ERR,
"Failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
iova);
return;
{
if (vq_is_packed(dev))
rte_free(vq->shadow_used_packed);
- else
+ else {
rte_free(vq->shadow_used_split);
+ if (vq->async_pkts_pending)
+ rte_free(vq->async_pkts_pending);
+ if (vq->async_pending_info)
+ rte_free(vq->async_pending_info);
+ }
rte_free(vq->batch_copy_elems);
rte_mempool_free(vq->iotlb_pool);
rte_free(vq);
rte_free(dev);
}
+static __rte_always_inline int
+log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ if (likely(!(vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG))))
+ return 0;
+
+ vq->log_guest_addr = translate_log_addr(dev, vq,
+ vq->ring_addrs.log_guest_addr);
+ if (vq->log_guest_addr == 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Converts vring log address to GPA
+ * If IOMMU is enabled, the log address is IOVA
+ * If IOMMU not enabled, the log address is already GPA
+ *
+ * Caller should have iotlb_lock read-locked
+ */
+uint64_t
+translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t log_addr)
+{
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
+ const uint64_t exp_size = sizeof(uint64_t);
+ uint64_t hva, gpa;
+ uint64_t size = exp_size;
+
+ hva = vhost_iova_to_vva(dev, vq, log_addr,
+ &size, VHOST_ACCESS_RW);
+
+ if (size != exp_size)
+ return 0;
+
+ gpa = hva_to_gpa(dev, hva, exp_size);
+ if (!gpa) {
+ VHOST_LOG_CONFIG(ERR,
+ "VQ: Failed to find GPA for log_addr: 0x%"
+ PRIx64 " hva: 0x%" PRIx64 "\n",
+ log_addr, hva);
+ return 0;
+ }
+ return gpa;
+
+ } else
+ return log_addr;
+}
+
+/* Caller should have iotlb_lock read-locked */
static int
vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
return 0;
}
+/* Caller should have iotlb_lock read-locked */
static int
vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
if (vring_translate_split(dev, vq) < 0)
return -1;
}
+
+ if (log_translate(dev, vq) < 0)
+ return -1;
+
vq->access_ok = 1;
return 0;
struct vhost_virtqueue *vq;
if (vring_idx >= VHOST_MAX_VRING) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"Failed not init vring, out of bound (%d)\n",
vring_idx);
return;
int callfd;
if (vring_idx >= VHOST_MAX_VRING) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"Failed not init vring, out of bound (%d)\n",
vring_idx);
return;
vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
if (vq == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"Failed to allocate memory for vring:%u.\n", vring_idx);
return -1;
}
}
if (i == MAX_VHOST_DEVICE) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"Failed to find a free slot for new device.\n");
return -1;
}
dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
if (dev == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"Failed to allocate memory for new dev.\n");
return -1;
}
dev->vid = i;
dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
dev->slave_req_fd = -1;
- dev->vdpa_dev_id = -1;
dev->postcopy_ufd = -1;
rte_spinlock_init(&dev->slave_req_lock);
vhost_destroy_device_notify(struct virtio_net *dev)
{
struct rte_vdpa_device *vdpa_dev;
- int did;
if (dev->flags & VIRTIO_DEV_RUNNING) {
- did = dev->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
+ vdpa_dev = dev->vdpa_dev;
if (vdpa_dev && vdpa_dev->ops->dev_close)
vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_RUNNING;
}
void
-vhost_attach_vdpa_device(int vid, int did)
+vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *vdpa_dev)
{
struct virtio_net *dev = get_device(vid);
if (dev == NULL)
return;
- if (rte_vdpa_get_device(did) == NULL)
- return;
-
- dev->vdpa_dev_id = did;
+ dev->vdpa_dev = vdpa_dev;
}
void
dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
}
+void
+vhost_enable_extbuf(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ dev->extbuf = 1;
+}
+
+void
+vhost_enable_linearbuf(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ dev->linearbuf = 1;
+}
+
int
rte_vhost_get_mtu(int vid, uint16_t *mtu)
{
ret = get_mempolicy(&numa_node, NULL, 0, dev,
MPOL_F_NODE | MPOL_F_ADDR);
if (ret < 0) {
- RTE_LOG(ERR, VHOST_CONFIG,
+ VHOST_LOG_CONFIG(ERR,
"(%d) failed to query numa node: %s\n",
vid, rte_strerror(errno));
return -1;
if (!vq)
return -1;
- vring->desc = vq->desc;
- vring->avail = vq->avail;
- vring->used = vq->used;
+ if (vq_is_packed(dev)) {
+ vring->desc_packed = vq->desc_packed;
+ vring->driver_event = vq->driver_event;
+ vring->device_event = vq->device_event;
+ } else {
+ vring->desc = vq->desc;
+ vring->avail = vq->avail;
+ vring->used = vq->used;
+ }
vring->log_guest_addr = vq->log_guest_addr;
vring->callfd = vq->callfd;
return 0;
if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
- RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
dev->vid, __func__, qid);
return 0;
}
return ret;
}
-int rte_vhost_get_vdpa_device_id(int vid)
+struct rte_vdpa_device *
+rte_vhost_get_vdpa_device(int vid)
{
struct virtio_net *dev = get_device(vid);
if (dev == NULL)
- return -1;
+ return NULL;
- return dev->vdpa_dev_id;
+ return dev->vdpa_dev;
}
int rte_vhost_get_log_base(int vid, uint64_t *log_base,
int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
uint16_t *last_avail_idx, uint16_t *last_used_idx)
{
+ struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
return -1;
- *last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx;
- *last_used_idx = dev->virtqueue[queue_id]->last_used_idx;
+ vq = dev->virtqueue[queue_id];
+ if (!vq)
+ return -1;
+
+ if (vq_is_packed(dev)) {
+ *last_avail_idx = (vq->avail_wrap_counter << 15) |
+ vq->last_avail_idx;
+ *last_used_idx = (vq->used_wrap_counter << 15) |
+ vq->last_used_idx;
+ } else {
+ *last_avail_idx = vq->last_avail_idx;
+ *last_used_idx = vq->last_used_idx;
+ }
+
+ return 0;
+}
+
+int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
+ uint16_t last_avail_idx, uint16_t last_used_idx)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+ if (!vq)
+ return -1;
+
+ if (vq_is_packed(dev)) {
+ vq->last_avail_idx = last_avail_idx & 0x7fff;
+ vq->avail_wrap_counter = !!(last_avail_idx & (1 << 15));
+ vq->last_used_idx = last_used_idx & 0x7fff;
+ vq->used_wrap_counter = !!(last_used_idx & (1 << 15));
+ } else {
+ vq->last_avail_idx = last_avail_idx;
+ vq->last_used_idx = last_used_idx;
+ }
return 0;
}
return 0;
}
-int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
- uint16_t last_avail_idx, uint16_t last_used_idx)
+int rte_vhost_extern_callback_register(int vid,
+ struct rte_vhost_user_extern_ops const * const ops, void *ctx)
{
struct virtio_net *dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || ops == NULL)
return -1;
- dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx;
- dev->virtqueue[queue_id]->last_used_idx = last_used_idx;
-
+ dev->extern_ops = *ops;
+ dev->extern_data = ctx;
return 0;
}
-int rte_vhost_extern_callback_register(int vid,
- struct rte_vhost_user_extern_ops const * const ops, void *ctx)
+int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
+ uint32_t features,
+ struct rte_vhost_async_channel_ops *ops)
{
+ struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
+ struct rte_vhost_async_features f;
if (dev == NULL || ops == NULL)
return -1;
- dev->extern_ops = *ops;
- dev->extern_data = ctx;
+ f.intval = features;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(vq == NULL || !dev->async_copy))
+ return -1;
+
+ /* packed queue is not supported */
+ if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
+ VHOST_LOG_CONFIG(ERR,
+ "async copy is not supported on packed queue or non-inorder mode "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ return -1;
+ }
+
+ if (unlikely(ops->check_completed_copies == NULL ||
+ ops->transfer_data == NULL))
+ return -1;
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(vq->async_registered)) {
+ VHOST_LOG_CONFIG(ERR,
+ "async register failed: channel already registered "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ goto reg_out;
+ }
+
+ vq->async_pkts_pending = rte_malloc(NULL,
+ vq->size * sizeof(uintptr_t),
+ RTE_CACHE_LINE_SIZE);
+ vq->async_pending_info = rte_malloc(NULL,
+ vq->size * sizeof(uint64_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!vq->async_pkts_pending || !vq->async_pending_info) {
+ if (vq->async_pkts_pending)
+ rte_free(vq->async_pkts_pending);
+
+ if (vq->async_pending_info)
+ rte_free(vq->async_pending_info);
+
+ VHOST_LOG_CONFIG(ERR,
+ "async register failed: cannot allocate memory for vq data "
+ "(vid %d, qid: %d)\n", vid, queue_id);
+ goto reg_out;
+ }
+
+ vq->async_ops.check_completed_copies = ops->check_completed_copies;
+ vq->async_ops.transfer_data = ops->transfer_data;
+
+ vq->async_inorder = f.async_inorder;
+ vq->async_threshold = f.async_threshold;
+
+ vq->async_registered = true;
+
+reg_out:
+ rte_spinlock_unlock(&vq->access_lock);
+
return 0;
}
+
+int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
+{
+ struct vhost_virtqueue *vq;
+ struct virtio_net *dev = get_device(vid);
+ int ret = -1;
+
+ if (dev == NULL)
+ return ret;
+
+ vq = dev->virtqueue[queue_id];
+
+ if (vq == NULL)
+ return ret;
+
+ ret = 0;
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (!vq->async_registered)
+ goto out;
+
+ if (vq->async_pkts_inflight_n) {
+ VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ "async inflight packets must be completed before unregistration.\n");
+ ret = -1;
+ goto out;
+ }
+
+ if (vq->async_pkts_pending) {
+ rte_free(vq->async_pkts_pending);
+ vq->async_pkts_pending = NULL;
+ }
+
+ if (vq->async_pending_info) {
+ rte_free(vq->async_pending_info);
+ vq->async_pending_info = NULL;
+ }
+
+ vq->async_ops.transfer_data = NULL;
+ vq->async_ops.check_completed_copies = NULL;
+ vq->async_registered = false;
+
+out:
+ rte_spinlock_unlock(&vq->access_lock);
+
+ return ret;
+}
+
+RTE_LOG_REGISTER(vhost_config_log_level, lib.vhost.config, INFO);
+RTE_LOG_REGISTER(vhost_data_log_level, lib.vhost.data, WARNING);