#include "vhost_user.h"
struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
+pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
/* Called with iotlb_lock read-locked */
uint64_t
return;
/* To make sure guest memory updates are committed before logging */
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
page = addr / VHOST_LOG_PAGE;
while (page * VHOST_LOG_PAGE < addr + len) {
if (unlikely(!dev->log_base))
return;
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
log_base = (unsigned long *)(uintptr_t)dev->log_base;
#endif
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vq->log_cache_nb_elem = 0;
}
* No more room for a new log cache entry,
* so write the dirty log map directly.
*/
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
return;
}
}
+static void
+vhost_free_async_mem(struct vhost_virtqueue *vq)
+{
+ if (vq->async_pkts_info)
+ rte_free(vq->async_pkts_info);
+ if (vq->async_descs_split)
+ rte_free(vq->async_descs_split);
+ if (vq->it_pool)
+ rte_free(vq->it_pool);
+ if (vq->vec_pool)
+ rte_free(vq->vec_pool);
+
+ vq->async_pkts_info = NULL;
+ vq->async_descs_split = NULL;
+ vq->it_pool = NULL;
+ vq->vec_pool = NULL;
+}
+
void
free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
rte_free(vq->shadow_used_packed);
else {
rte_free(vq->shadow_used_split);
- if (vq->async_pkts_pending)
- rte_free(vq->async_pkts_pending);
- if (vq->async_pending_info)
- rte_free(vq->async_pending_info);
+ vhost_free_async_mem(vq);
}
rte_free(vq->batch_copy_elems);
rte_mempool_free(vq->iotlb_pool);
}
vq = dev->virtqueue[vring_idx];
+ if (!vq) {
+ VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n",
+ vring_idx);
+ return;
+ }
memset(vq, 0, sizeof(struct vhost_virtqueue));
vhost_user_iotlb_init(dev, vring_idx);
/* Backends are set to -1 indicating an inactive device. */
vq->backend = -1;
-
- TAILQ_INIT(&vq->zmbuf_list);
}
static void
}
vq = dev->virtqueue[vring_idx];
+ if (!vq) {
+ VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n",
+ vring_idx);
+ return;
+ }
+
callfd = vq->callfd;
init_vring_queue(dev, vring_idx);
vq->callfd = callfd;
alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
{
struct vhost_virtqueue *vq;
+ uint32_t i;
- vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
- if (vq == NULL) {
- VHOST_LOG_CONFIG(ERR,
- "Failed to allocate memory for vring:%u.\n", vring_idx);
- return -1;
- }
+ /* Also allocate holes, if any, up to requested vring index. */
+ for (i = 0; i <= vring_idx; i++) {
+ if (dev->virtqueue[i])
+ continue;
- dev->virtqueue[vring_idx] = vq;
- init_vring_queue(dev, vring_idx);
- rte_spinlock_init(&vq->access_lock);
- vq->avail_wrap_counter = 1;
- vq->used_wrap_counter = 1;
- vq->signalled_used_valid = false;
+ vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
+ if (vq == NULL) {
+ VHOST_LOG_CONFIG(ERR,
+ "Failed to allocate memory for vring:%u.\n", i);
+ return -1;
+ }
- dev->nr_vring += 1;
+ dev->virtqueue[i] = vq;
+ init_vring_queue(dev, i);
+ rte_spinlock_init(&vq->access_lock);
+ vq->avail_wrap_counter = 1;
+ vq->used_wrap_counter = 1;
+ vq->signalled_used_valid = false;
+ }
+
+ dev->nr_vring = RTE_MAX(dev->nr_vring, vring_idx + 1);
return 0;
}
struct virtio_net *dev;
int i;
+ pthread_mutex_lock(&vhost_dev_lock);
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
if (vhost_devices[i] == NULL)
break;
if (i == MAX_VHOST_DEVICE) {
VHOST_LOG_CONFIG(ERR,
"Failed to find a free slot for new device.\n");
+ pthread_mutex_unlock(&vhost_dev_lock);
return -1;
}
if (dev == NULL) {
VHOST_LOG_CONFIG(ERR,
"Failed to allocate memory for new dev.\n");
+ pthread_mutex_unlock(&vhost_dev_lock);
return -1;
}
vhost_devices[i] = dev;
+ pthread_mutex_unlock(&vhost_dev_lock);
+
dev->vid = i;
dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
dev->slave_req_fd = -1;
dev->ifname[sizeof(dev->ifname) - 1] = '\0';
}
-void
-vhost_enable_dequeue_zero_copy(int vid)
-{
- struct virtio_net *dev = get_device(vid);
-
- if (dev == NULL)
- return;
-
- dev->dequeue_zero_copy = 1;
- VHOST_LOG_CONFIG(INFO, "dequeue zero copy is enabled\n");
-}
-
void
vhost_set_builtin_virtio_net(int vid, bool enable)
{
if (unlikely(idx >= vq->size))
return -1;
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
vq->inflight_split->desc[idx].inflight = 0;
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
vq->inflight_split->used_idx = last_used_idx;
return 0;
if (unlikely(head >= vq->size))
return -1;
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
inflight_info->desc[head].inflight = 0;
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
inflight_info->old_free_head = inflight_info->free_head;
inflight_info->old_used_idx = inflight_info->used_idx;
if (!dev)
return 0;
+ if (queue_id >= VHOST_MAX_VRING)
+ return 0;
+
vq = dev->virtqueue[queue_id];
+ if (!vq)
+ return 0;
rte_spinlock_lock(&vq->access_lock);
vq->avail_wrap_counter << 15;
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vq->device_event->flags = flags;
return 0;
if (!dev)
return -1;
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
vq = dev->virtqueue[queue_id];
+ if (!vq)
+ return -1;
rte_spinlock_lock(&vq->access_lock);
if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
return -1;
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
vq = dev->virtqueue[queue_id];
if (!vq)
return -1;
if (!dev)
return -1;
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
vq = dev->virtqueue[queue_id];
if (!vq)
return -1;
uint16_t *last_used_idx)
{
struct rte_vhost_inflight_info_packed *inflight_info;
+ struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
return -1;
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+ if (!vq)
+ return -1;
+
if (!vq_is_packed(dev))
return -1;
- inflight_info = dev->virtqueue[queue_id]->inflight_packed;
+ inflight_info = vq->inflight_packed;
if (!inflight_info)
return -1;
struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
struct rte_vhost_async_features f;
+ int node;
if (dev == NULL || ops == NULL)
return -1;
f.intval = features;
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
vq = dev->virtqueue[queue_id];
if (unlikely(vq == NULL || !dev->async_copy))
goto reg_out;
}
- vq->async_pkts_pending = rte_malloc(NULL,
- vq->size * sizeof(uintptr_t),
- RTE_CACHE_LINE_SIZE);
- vq->async_pending_info = rte_malloc(NULL,
- vq->size * sizeof(uint64_t),
- RTE_CACHE_LINE_SIZE);
- if (!vq->async_pkts_pending || !vq->async_pending_info) {
- if (vq->async_pkts_pending)
- rte_free(vq->async_pkts_pending);
-
- if (vq->async_pending_info)
- rte_free(vq->async_pending_info);
+#ifdef RTE_LIBRTE_VHOST_NUMA
+ if (get_mempolicy(&node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
+ VHOST_LOG_CONFIG(ERR,
+ "unable to get numa information in async register. "
+ "allocating async buffer memory on the caller thread node\n");
+ node = SOCKET_ID_ANY;
+ }
+#else
+ node = SOCKET_ID_ANY;
+#endif
+ vq->async_pkts_info = rte_malloc_socket(NULL,
+ vq->size * sizeof(struct async_inflight_info),
+ RTE_CACHE_LINE_SIZE, node);
+ vq->it_pool = rte_malloc_socket(NULL,
+ VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
+ RTE_CACHE_LINE_SIZE, node);
+ vq->vec_pool = rte_malloc_socket(NULL,
+ VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
+ RTE_CACHE_LINE_SIZE, node);
+ vq->async_descs_split = rte_malloc_socket(NULL,
+ vq->size * sizeof(struct vring_used_elem),
+ RTE_CACHE_LINE_SIZE, node);
+ if (!vq->async_descs_split || !vq->async_pkts_info ||
+ !vq->it_pool || !vq->vec_pool) {
+ vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
"async register failed: cannot allocate memory for vq data "
"(vid %d, qid: %d)\n", vid, queue_id);
if (dev == NULL)
return ret;
+ if (queue_id >= VHOST_MAX_VRING)
+ return ret;
+
vq = dev->virtqueue[queue_id];
if (vq == NULL)
return ret;
ret = 0;
- rte_spinlock_lock(&vq->access_lock);
if (!vq->async_registered)
- goto out;
+ return ret;
+
+ if (!rte_spinlock_trylock(&vq->access_lock)) {
+ VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
+ "virt queue busy.\n");
+ return -1;
+ }
if (vq->async_pkts_inflight_n) {
VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
goto out;
}
- if (vq->async_pkts_pending) {
- rte_free(vq->async_pkts_pending);
- vq->async_pkts_pending = NULL;
- }
-
- if (vq->async_pending_info) {
- rte_free(vq->async_pending_info);
- vq->async_pending_info = NULL;
- }
+ vhost_free_async_mem(vq);
vq->async_ops.transfer_data = NULL;
vq->async_ops.check_completed_copies = NULL;