#include "vhost_user.h"
struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
+pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
/* Called with iotlb_lock read-locked */
uint64_t
return;
/* To make sure guest memory updates are committed before logging */
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
page = addr / VHOST_LOG_PAGE;
while (page * VHOST_LOG_PAGE < addr + len) {
if (unlikely(!dev->log_base))
return;
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
log_base = (unsigned long *)(uintptr_t)dev->log_base;
#endif
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vq->log_cache_nb_elem = 0;
}
* No more room for a new log cache entry,
* so write the dirty log map directly.
*/
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
return;
static void
vhost_free_async_mem(struct vhost_virtqueue *vq)
{
- if (vq->async_pkts_pending)
- rte_free(vq->async_pkts_pending);
if (vq->async_pkts_info)
rte_free(vq->async_pkts_info);
+ if (vq->async_descs_split)
+ rte_free(vq->async_descs_split);
if (vq->it_pool)
rte_free(vq->it_pool);
if (vq->vec_pool)
rte_free(vq->vec_pool);
- vq->async_pkts_pending = NULL;
vq->async_pkts_info = NULL;
+ vq->async_descs_split = NULL;
vq->it_pool = NULL;
vq->vec_pool = NULL;
}
}
vq = dev->virtqueue[vring_idx];
+ if (!vq) {
+ VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n",
+ vring_idx);
+ return;
+ }
memset(vq, 0, sizeof(struct vhost_virtqueue));
}
vq = dev->virtqueue[vring_idx];
+ if (!vq) {
+ VHOST_LOG_CONFIG(ERR, "Virtqueue not allocated (%d)\n",
+ vring_idx);
+ return;
+ }
+
callfd = vq->callfd;
init_vring_queue(dev, vring_idx);
vq->callfd = callfd;
}
dev->virtqueue[i] = vq;
- init_vring_queue(dev, vring_idx);
+ init_vring_queue(dev, i);
rte_spinlock_init(&vq->access_lock);
vq->avail_wrap_counter = 1;
vq->used_wrap_counter = 1;
struct virtio_net *dev;
int i;
+ pthread_mutex_lock(&vhost_dev_lock);
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
if (vhost_devices[i] == NULL)
break;
if (i == MAX_VHOST_DEVICE) {
VHOST_LOG_CONFIG(ERR,
"Failed to find a free slot for new device.\n");
+ pthread_mutex_unlock(&vhost_dev_lock);
return -1;
}
if (dev == NULL) {
VHOST_LOG_CONFIG(ERR,
"Failed to allocate memory for new dev.\n");
+ pthread_mutex_unlock(&vhost_dev_lock);
return -1;
}
vhost_devices[i] = dev;
+ pthread_mutex_unlock(&vhost_dev_lock);
+
dev->vid = i;
dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
dev->slave_req_fd = -1;
if (unlikely(idx >= vq->size))
return -1;
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
vq->inflight_split->desc[idx].inflight = 0;
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
vq->inflight_split->used_idx = last_used_idx;
return 0;
if (unlikely(head >= vq->size))
return -1;
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
inflight_info->desc[head].inflight = 0;
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
inflight_info->old_free_head = inflight_info->free_head;
inflight_info->old_used_idx = inflight_info->used_idx;
vq->avail_wrap_counter << 15;
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vq->device_event->flags = flags;
return 0;
uint16_t *last_used_idx)
{
struct rte_vhost_inflight_info_packed *inflight_info;
+ struct vhost_virtqueue *vq;
struct virtio_net *dev = get_device(vid);
if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
return -1;
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[queue_id];
+ if (!vq)
+ return -1;
+
if (!vq_is_packed(dev))
return -1;
- inflight_info = dev->virtqueue[queue_id]->inflight_packed;
+ inflight_info = vq->inflight_packed;
if (!inflight_info)
return -1;
f.intval = features;
+ if (queue_id >= VHOST_MAX_VRING)
+ return -1;
+
vq = dev->virtqueue[queue_id];
if (unlikely(vq == NULL || !dev->async_copy))
node = SOCKET_ID_ANY;
#endif
- vq->async_pkts_pending = rte_malloc_socket(NULL,
- vq->size * sizeof(uintptr_t),
- RTE_CACHE_LINE_SIZE, node);
vq->async_pkts_info = rte_malloc_socket(NULL,
vq->size * sizeof(struct async_inflight_info),
RTE_CACHE_LINE_SIZE, node);
vq->vec_pool = rte_malloc_socket(NULL,
VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
RTE_CACHE_LINE_SIZE, node);
- if (!vq->async_pkts_pending || !vq->async_pkts_info ||
+ vq->async_descs_split = rte_malloc_socket(NULL,
+ vq->size * sizeof(struct vring_used_elem),
+ RTE_CACHE_LINE_SIZE, node);
+ if (!vq->async_descs_split || !vq->async_pkts_info ||
!vq->it_pool || !vq->vec_pool) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,
if (dev == NULL)
return ret;
+ if (queue_id >= VHOST_MAX_VRING)
+ return ret;
+
vq = dev->virtqueue[queue_id];
if (vq == NULL)