#include <stdint.h>
#include <stdlib.h>
#ifdef RTE_LIBRTE_VHOST_NUMA
+#include <numa.h>
#include <numaif.h>
#endif
req_size = sizeof(struct vring_packed_desc) * vq->size;
size = req_size;
- vq->desc_packed =
- (struct vring_packed_desc *)(uintptr_t)vhost_iova_to_vva(dev,
- vq, vq->ring_addrs.desc_user_addr,
- &size, VHOST_ACCESS_RW);
+ vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
+ vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr,
+ &size, VHOST_ACCESS_RW);
if (!vq->desc_packed || size != req_size)
return -1;
+ req_size = sizeof(struct vring_packed_desc_event);
+ size = req_size;
+ vq->driver_event = (struct vring_packed_desc_event *)(uintptr_t)
+ vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr,
+ &size, VHOST_ACCESS_RW);
+ if (!vq->driver_event || size != req_size)
+ return -1;
+
+ req_size = sizeof(struct vring_packed_desc_event);
+ size = req_size;
+ vq->device_event = (struct vring_packed_desc_event *)(uintptr_t)
+ vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr,
+ &size, VHOST_ACCESS_RW);
+ if (!vq->device_event || size != req_size)
+ return -1;
+
return 0;
}
rte_spinlock_init(&vq->access_lock);
vq->avail_wrap_counter = 1;
vq->used_wrap_counter = 1;
+ vq->signalled_used_valid = false;
dev->nr_vring += 1;
dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
dev->slave_req_fd = -1;
dev->vdpa_dev_id = -1;
+ dev->postcopy_ufd = -1;
rte_spinlock_init(&dev->slave_req_lock);
return i;
dev->vdpa_dev_id = did;
}
-void
-vhost_detach_vdpa_device(int vid)
-{
- struct virtio_net *dev = get_device(vid);
-
- if (dev == NULL)
- return;
-
- vhost_user_host_notifier_ctrl(vid, false);
-
- dev->vdpa_dev_id = -1;
-}
-
void
vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
{
{
struct virtio_net *dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || mtu == NULL)
return -ENODEV;
if (!(dev->flags & VIRTIO_DEV_READY))
int numa_node;
int ret;
- if (dev == NULL)
+ if (dev == NULL || numa_available() != 0)
return -1;
ret = get_mempolicy(&numa_node, NULL, 0, dev,
{
struct virtio_net *dev = get_device(vid);
- if (dev == NULL)
+ if (dev == NULL || buf == NULL)
return -1;
len = RTE_MIN(len, sizeof(dev->ifname));
struct virtio_net *dev;
dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || features == NULL)
return -1;
*features = dev->features;
size_t size;
dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || mem == NULL)
return -1;
size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
struct vhost_virtqueue *vq;
dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || vring == NULL)
return -1;
if (vring_idx >= VHOST_MAX_VRING)
if (!vq)
return -1;
- vhost_vring_call(dev, vq);
+ if (vq_is_packed(dev))
+ vhost_vring_call_packed(dev, vq);
+ else
+ vhost_vring_call_split(dev, vq);
+
return 0;
}
return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
}
+static inline void
+vhost_enable_notify_split(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, int enable)
+{
+ if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
+ if (enable)
+ vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
+ else
+ vq->used->flags |= VRING_USED_F_NO_NOTIFY;
+ } else {
+ if (enable)
+ vhost_avail_event(vq) = vq->last_avail_idx;
+ }
+}
+
+static inline void
+vhost_enable_notify_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, int enable)
+{
+ uint16_t flags;
+
+ if (!enable) {
+ vq->device_event->flags = VRING_EVENT_F_DISABLE;
+ return;
+ }
+
+ flags = VRING_EVENT_F_ENABLE;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
+ flags = VRING_EVENT_F_DESC;
+ vq->device_event->off_wrap = vq->last_avail_idx |
+ vq->avail_wrap_counter << 15;
+ }
+
+ rte_smp_wmb();
+
+ vq->device_event->flags = flags;
+}
+
int
rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
{
struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
if (!dev)
return -1;
- if (enable)
- dev->virtqueue[queue_id]->used->flags &=
- ~VRING_USED_F_NO_NOTIFY;
+ vq = dev->virtqueue[queue_id];
+
+ if (vq_is_packed(dev))
+ vhost_enable_notify_packed(dev, vq, enable);
else
- dev->virtqueue[queue_id]->used->flags |= VRING_USED_F_NO_NOTIFY;
+ vhost_enable_notify_split(dev, vq, enable);
+
return 0;
}
{
struct virtio_net *dev = get_device(vid);
- if (!dev)
+ if (dev == NULL || log_base == NULL || log_size == NULL)
return -1;
- if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- RTE_LOG(ERR, VHOST_DATA,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
- return -1;
- }
-
*log_base = dev->log_base;
*log_size = dev->log_size;
{
struct virtio_net *dev = get_device(vid);
- if (!dev)
- return -1;
-
- if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- RTE_LOG(ERR, VHOST_DATA,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
+ if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
return -1;
- }
*last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx;
*last_used_idx = dev->virtqueue[queue_id]->last_used_idx;
if (!dev)
return -1;
- if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- RTE_LOG(ERR, VHOST_DATA,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
- return -1;
- }
-
dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx;
dev->virtqueue[queue_id]->last_used_idx = last_used_idx;
return 0;
}
+
+int rte_vhost_extern_callback_register(int vid,
+ struct rte_vhost_user_extern_ops const * const ops, void *ctx)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL || ops == NULL)
+ return -1;
+
+ dev->extern_ops = *ops;
+ dev->extern_data = ctx;
+ return 0;
+}