#include "vhost_user.h"
struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
+pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
/* Called with iotlb_lock read-locked */
uint64_t
static void
vhost_free_async_mem(struct vhost_virtqueue *vq)
{
- if (vq->async_pkts_pending)
- rte_free(vq->async_pkts_pending);
if (vq->async_pkts_info)
rte_free(vq->async_pkts_info);
+ if (vq->async_descs_split)
+ rte_free(vq->async_descs_split);
if (vq->it_pool)
rte_free(vq->it_pool);
if (vq->vec_pool)
rte_free(vq->vec_pool);
- vq->async_pkts_pending = NULL;
vq->async_pkts_info = NULL;
+ vq->async_descs_split = NULL;
vq->it_pool = NULL;
vq->vec_pool = NULL;
}
struct virtio_net *dev;
int i;
+ pthread_mutex_lock(&vhost_dev_lock);
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
if (vhost_devices[i] == NULL)
break;
if (i == MAX_VHOST_DEVICE) {
VHOST_LOG_CONFIG(ERR,
"Failed to find a free slot for new device.\n");
+ pthread_mutex_unlock(&vhost_dev_lock);
return -1;
}
if (dev == NULL) {
VHOST_LOG_CONFIG(ERR,
"Failed to allocate memory for new dev.\n");
+ pthread_mutex_unlock(&vhost_dev_lock);
return -1;
}
vhost_devices[i] = dev;
+ pthread_mutex_unlock(&vhost_dev_lock);
+
dev->vid = i;
dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
dev->slave_req_fd = -1;
node = SOCKET_ID_ANY;
#endif
- vq->async_pkts_pending = rte_malloc_socket(NULL,
- vq->size * sizeof(uintptr_t),
- RTE_CACHE_LINE_SIZE, node);
vq->async_pkts_info = rte_malloc_socket(NULL,
vq->size * sizeof(struct async_inflight_info),
RTE_CACHE_LINE_SIZE, node);
vq->vec_pool = rte_malloc_socket(NULL,
VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
RTE_CACHE_LINE_SIZE, node);
- if (!vq->async_pkts_pending || !vq->async_pkts_info ||
+ vq->async_descs_split = rte_malloc_socket(NULL,
+ vq->size * sizeof(struct vring_used_elem),
+ RTE_CACHE_LINE_SIZE, node);
+ if (!vq->async_descs_split || !vq->async_pkts_info ||
!vq->it_pool || !vq->vec_pool) {
vhost_free_async_mem(vq);
VHOST_LOG_CONFIG(ERR,