[VHOST_USER_POSTCOPY_END] = "VHOST_USER_POSTCOPY_END",
[VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD",
[VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD",
+ [VHOST_USER_SET_STATUS] = "VHOST_USER_SET_STATUS",
+ [VHOST_USER_GET_STATUS] = "VHOST_USER_GET_STATUS",
};
static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
{
int i;
- for (i = 0; i < msg->fd_num; i++)
- close(msg->fds[i]);
+ for (i = 0; i < msg->fd_num; i++) {
+ int fd = msg->fds[i];
+
+ if (fd == -1)
+ continue;
+
+ msg->fds[i] = -1;
+ close(fd);
+ }
}
/*
return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
}
-/*
- * Reclaim all the outstanding zmbufs for a virtqueue.
- */
-static void
-drain_zmbuf_list(struct vhost_virtqueue *vq)
-{
- struct zcopy_mbuf *zmbuf, *next;
-
- for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
- zmbuf != NULL; zmbuf = next) {
- next = TAILQ_NEXT(zmbuf, next);
-
- while (!mbuf_is_consumed(zmbuf->mbuf))
- usleep(1000);
-
- TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
- restore_mbuf(zmbuf->mbuf);
- rte_pktmbuf_free(zmbuf->mbuf);
- put_zmbuf(zmbuf);
- vq->nr_zmbuf -= 1;
- }
-}
-
static void
free_mem_region(struct virtio_net *dev)
{
uint32_t i;
struct rte_vhost_mem_region *reg;
- struct vhost_virtqueue *vq;
if (!dev || !dev->mem)
return;
- if (dev->dequeue_zero_copy) {
- for (i = 0; i < dev->nr_vring; i++) {
- vq = dev->virtqueue[i];
- if (vq)
- drain_zmbuf_list(vq);
- }
- }
-
for (i = 0; i < dev->mem->nregions; i++) {
reg = &dev->mem->regions[i];
if (reg->host_user_addr) {
dev->inflight_info->addr = NULL;
}
- if (dev->inflight_info->fd > 0) {
+ if (dev->inflight_info->fd >= 0) {
close(dev->inflight_info->fd);
dev->inflight_info->fd = -1;
}
dev->postcopy_listening = 0;
}
+static void
+vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index,
+ int enable)
+{
+ struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
+ struct vhost_virtqueue *vq = dev->virtqueue[index];
+
+ /* Configure guest notifications on enable */
+ if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF)
+ vhost_enable_guest_notification(dev, vq, vq->notif_enable);
+
+ if (vdpa_dev && vdpa_dev->ops->set_vring_state)
+ vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
+
+ if (dev->notify_ops->vring_state_changed)
+ dev->notify_ops->vring_state_changed(dev->vid,
+ index, enable);
+}
+
/*
* This function just returns success at the moment unless
* the device hasn't been initialised.
uint64_t features = msg->payload.u64;
uint64_t vhost_features = 0;
struct rte_vdpa_device *vdpa_dev;
- int did = -1;
if (validate_msg_fds(msg, 0) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
VHOST_LOG_CONFIG(ERR,
"(%d) received invalid negotiated features.\n",
dev->vid);
+ dev->flags |= VIRTIO_DEV_FEATURES_FAILED;
+ dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
+
return RTE_VHOST_MSG_RESULT_ERR;
}
dev->features = features;
if (dev->features &
- ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
+ ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_VERSION_1) |
+ (1ULL << VIRTIO_F_RING_PACKED))) {
dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else {
dev->vhost_hlen = sizeof(struct virtio_net_hdr);
}
}
- did = dev->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
- if (vdpa_dev && vdpa_dev->ops->set_features)
+ vdpa_dev = dev->vdpa_dev;
+ if (vdpa_dev)
vdpa_dev->ops->set_features(dev->vid);
+ dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED;
return RTE_VHOST_MSG_RESULT_OK;
}
return RTE_VHOST_MSG_RESULT_ERR;
}
- if (dev->dequeue_zero_copy) {
- vq->nr_zmbuf = 0;
- vq->last_zmbuf_idx = 0;
- vq->zmbuf_size = vq->size;
- if (vq->zmbufs)
- rte_free(vq->zmbufs);
- vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
- sizeof(struct zcopy_mbuf), 0);
- if (vq->zmbufs == NULL) {
- VHOST_LOG_CONFIG(WARNING,
- "failed to allocate mem for zero copy; "
- "zero copy is force disabled\n");
- dev->dequeue_zero_copy = 0;
- }
- TAILQ_INIT(&vq->zmbuf_list);
- }
-
if (vq_is_packed(dev)) {
if (vq->shadow_used_packed)
rte_free(vq->shadow_used_packed);
} else {
if (vq->shadow_used_split)
rte_free(vq->shadow_used_split);
+
vq->shadow_used_split = rte_malloc(NULL,
vq->size * sizeof(struct vring_used_elem),
RTE_CACHE_LINE_SIZE);
+
if (!vq->shadow_used_split) {
VHOST_LOG_CONFIG(ERR,
- "failed to allocate memory for shadow used ring.\n");
+ "failed to allocate memory for vq internal data.\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
}
int oldnode, newnode;
struct virtio_net *old_dev;
struct vhost_virtqueue *old_vq, *vq;
- struct zcopy_mbuf *new_zmbuf;
struct vring_used_elem *new_shadow_used_split;
struct vring_used_elem_packed *new_shadow_used_packed;
struct batch_copy_elem *new_batch_copy_elems;
return dev;
memcpy(vq, old_vq, sizeof(*vq));
- TAILQ_INIT(&vq->zmbuf_list);
-
- if (dev->dequeue_zero_copy) {
- new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
- sizeof(struct zcopy_mbuf), 0, newnode);
- if (new_zmbuf) {
- rte_free(vq->zmbufs);
- vq->zmbufs = new_zmbuf;
- }
- }
if (vq_is_packed(dev)) {
new_shadow_used_packed = rte_malloc_socket(NULL,
uint64_t alignment;
uint32_t i;
int populate;
- int fd;
if (validate_msg_fds(msg, memory->nregions) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
VHOST_LOG_CONFIG(ERR,
"too many memory regions (%u)\n", memory->nregions);
- return RTE_VHOST_MSG_RESULT_ERR;
+ goto close_msg_fds;
}
if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
}
if (dev->mem) {
+ if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) {
+ struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
+
+ if (vdpa_dev && vdpa_dev->ops->dev_close)
+ vdpa_dev->ops->dev_close(dev->vid);
+ dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
+ }
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
"(%d) failed to allocate memory "
"for dev->guest_pages\n",
dev->vid);
- return RTE_VHOST_MSG_RESULT_ERR;
+ goto close_msg_fds;
}
}
VHOST_LOG_CONFIG(ERR,
"(%d) failed to allocate memory for dev->mem\n",
dev->vid);
- return RTE_VHOST_MSG_RESULT_ERR;
+ goto free_guest_pages;
}
dev->mem->nregions = memory->nregions;
for (i = 0; i < memory->nregions; i++) {
- fd = msg->fds[i];
reg = &dev->mem->regions[i];
reg->guest_phys_addr = memory->regions[i].guest_phys_addr;
reg->guest_user_addr = memory->regions[i].userspace_addr;
reg->size = memory->regions[i].memory_size;
- reg->fd = fd;
+ reg->fd = msg->fds[i];
+
+ /*
+ * Assign invalid file descriptor value to avoid double
+ * closing on error path.
+ */
+ msg->fds[i] = -1;
mmap_offset = memory->regions[i].mmap_offset;
"mmap_offset (%#"PRIx64") and memory_size "
"(%#"PRIx64") overflow\n",
mmap_offset, reg->size);
- goto err_mmap;
+ goto free_mem_table;
}
mmap_size = reg->size + mmap_offset;
* to avoid failure, make sure in caller to keep length
* aligned.
*/
- alignment = get_blk_size(fd);
+ alignment = get_blk_size(reg->fd);
if (alignment == (uint64_t)-1) {
VHOST_LOG_CONFIG(ERR,
"couldn't get hugepage size through fstat\n");
- goto err_mmap;
+ goto free_mem_table;
}
mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
if (mmap_size == 0) {
VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") "
"or alignment (0x%" PRIx64 ") is invalid\n",
reg->size + mmap_offset, alignment);
- goto err_mmap;
+ goto free_mem_table;
}
- populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;
+ populate = dev->async_copy ? MAP_POPULATE : 0;
mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | populate, fd, 0);
+ MAP_SHARED | populate, reg->fd, 0);
if (mmap_addr == MAP_FAILED) {
VHOST_LOG_CONFIG(ERR,
"mmap region %u failed.\n", i);
- goto err_mmap;
+ goto free_mem_table;
}
reg->mmap_addr = mmap_addr;
reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
mmap_offset;
- if (dev->dequeue_zero_copy)
+ if (dev->async_copy)
if (add_guest_pages(dev, reg, alignment) < 0) {
VHOST_LOG_CONFIG(ERR,
"adding guest pages to region %u failed.\n",
i);
- goto err_mmap;
+ goto free_mem_table;
}
VHOST_LOG_CONFIG(INFO,
if (read_vhost_message(main_fd, &ack_msg) <= 0) {
VHOST_LOG_CONFIG(ERR,
"Failed to read qemu ack on postcopy set-mem-table\n");
- goto err_mmap;
+ goto free_mem_table;
}
if (validate_msg_fds(&ack_msg, 0) != 0)
- goto err_mmap;
+ goto free_mem_table;
if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
VHOST_LOG_CONFIG(ERR,
"Bad qemu ack on postcopy set-mem-table (%d)\n",
ack_msg.request.master);
- goto err_mmap;
+ goto free_mem_table;
}
/* Now userfault register and we can use the memory */
"Failed to register ufd for region %d: (ufd = %d) %s\n",
i, dev->postcopy_ufd,
strerror(errno));
- goto err_mmap;
+ goto free_mem_table;
}
VHOST_LOG_CONFIG(INFO,
"\t userfaultfd registered for range : "
(uint64_t)reg_struct.range.start +
(uint64_t)reg_struct.range.len - 1);
#else
- goto err_mmap;
+ goto free_mem_table;
#endif
}
}
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
+ if (!vq)
+ continue;
+
if (vq->desc || vq->avail || vq->used) {
/*
* If the memory table got updated, the ring addresses
dev = translate_ring_addresses(dev, i);
if (!dev) {
dev = *pdev;
- goto err_mmap;
+ goto free_mem_table;
}
*pdev = dev;
return RTE_VHOST_MSG_RESULT_OK;
-err_mmap:
+free_mem_table:
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
+free_guest_pages:
+ rte_free(dev->guest_pages);
+ dev->guest_pages = NULL;
+close_msg_fds:
+ close_msg_fds(msg);
return RTE_VHOST_MSG_RESULT_ERR;
}
return rings_ok &&
vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
- vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
+ vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD &&
+ vq->enabled;
}
+#define VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY 2u
+
static int
virtio_is_ready(struct virtio_net *dev)
{
struct vhost_virtqueue *vq;
- uint32_t i;
+ uint32_t i, nr_vring = dev->nr_vring;
+
+ if (dev->flags & VIRTIO_DEV_READY)
+ return 1;
- if (dev->nr_vring == 0)
+ if (!dev->nr_vring)
return 0;
- for (i = 0; i < dev->nr_vring; i++) {
+ if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) {
+ nr_vring = VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY;
+
+ if (dev->nr_vring < nr_vring)
+ return 0;
+ }
+
+ for (i = 0; i < nr_vring; i++) {
vq = dev->virtqueue[i];
if (!vq_is_ready(dev, vq))
return 0;
}
- VHOST_LOG_CONFIG(INFO,
- "virtio is now ready for processing.\n");
+ /* If supported, ensure the frontend is really done with config */
+ if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS))
+ if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK))
+ return 0;
+
+ dev->flags |= VIRTIO_DEV_READY;
+
+ if (!(dev->flags & VIRTIO_DEV_RUNNING))
+ VHOST_LOG_CONFIG(INFO,
+ "virtio is now ready for processing.\n");
return 1;
}
"failed to alloc dev inflight area\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
+ dev->inflight_info->fd = -1;
}
num_queues = msg->payload.inflight.num_queues;
}
memset(addr, 0, mmap_size);
+ if (dev->inflight_info->addr) {
+ munmap(dev->inflight_info->addr, dev->inflight_info->size);
+ dev->inflight_info->addr = NULL;
+ }
+
+ if (dev->inflight_info->fd >= 0) {
+ close(dev->inflight_info->fd);
+ dev->inflight_info->fd = -1;
+ }
+
dev->inflight_info->addr = addr;
dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size;
dev->inflight_info->fd = msg->fds[0] = fd;
"failed to alloc dev inflight area\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
+ dev->inflight_info->fd = -1;
}
- if (dev->inflight_info->addr)
+ if (dev->inflight_info->addr) {
munmap(dev->inflight_info->addr, dev->inflight_info->size);
+ dev->inflight_info->addr = NULL;
+ }
addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
fd, mmap_offset);
return RTE_VHOST_MSG_RESULT_ERR;
}
- if (dev->inflight_info->fd)
+ if (dev->inflight_info->fd >= 0) {
close(dev->inflight_info->fd);
+ dev->inflight_info->fd = -1;
+ }
dev->inflight_info->fd = fd;
dev->inflight_info->addr = addr;
for (i = 0; i < num_queues; i++) {
vq = dev->virtqueue[i];
+ if (!vq)
+ continue;
+
if (vq_is_packed(dev)) {
vq->inflight_packed = addr;
vq->inflight_packed->desc_num = queue_size;
"vring call idx:%d file:%d\n", file.index, file.fd);
vq = dev->virtqueue[file.index];
+
+ if (vq->ready) {
+ vq->ready = 0;
+ vhost_user_notify_queue_state(dev, file.index, 0);
+ }
+
if (vq->callfd >= 0)
close(vq->callfd);
/* Interpret ring addresses only when ring is started. */
dev = translate_ring_addresses(dev, file.index);
- if (!dev)
+ if (!dev) {
+ if (file.fd != VIRTIO_INVALID_EVENTFD)
+ close(file.fd);
+
return RTE_VHOST_MSG_RESULT_ERR;
+ }
*pdev = dev;
dev->vid, file.index, 1);
}
+ if (vq->ready) {
+ vq->ready = 0;
+ vhost_user_notify_queue_state(dev, file.index, 0);
+ }
+
if (vq->kickfd >= 0)
close(vq->kickfd);
vq->kickfd = file.fd;
return RTE_VHOST_MSG_RESULT_OK;
}
-static void
-free_zmbufs(struct vhost_virtqueue *vq)
-{
- drain_zmbuf_list(vq);
-
- rte_free(vq->zmbufs);
-}
-
/*
* when virtio is stopped, qemu will send us the GET_VRING_BASE message.
*/
vq->signalled_used_valid = false;
- if (dev->dequeue_zero_copy)
- free_zmbufs(vq);
if (vq_is_packed(dev)) {
rte_free(vq->shadow_used_packed);
vq->shadow_used_packed = NULL;
} else {
rte_free(vq->shadow_used_split);
vq->shadow_used_split = NULL;
+ if (vq->async_pkts_pending)
+ rte_free(vq->async_pkts_pending);
+ if (vq->async_pkts_info)
+ rte_free(vq->async_pkts_info);
+ vq->async_pkts_pending = NULL;
+ vq->async_pkts_info = NULL;
}
rte_free(vq->batch_copy_elems);
struct virtio_net *dev = *pdev;
int enable = (int)msg->payload.state.num;
int index = (int)msg->payload.state.index;
- struct rte_vdpa_device *vdpa_dev;
- int did = -1;
if (validate_msg_fds(msg, 0) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
"set queue enable: %d to qp idx: %d\n",
enable, index);
- did = dev->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
- if (vdpa_dev && vdpa_dev->ops->set_vring_state)
- vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
-
- if (dev->notify_ops->vring_state_changed)
- dev->notify_ops->vring_state_changed(dev->vid,
- index, enable);
-
- /* On disable, rings have to be stopped being processed. */
- if (!enable && dev->dequeue_zero_copy)
- drain_zmbuf_list(dev->virtqueue[index]);
+ if (enable && dev->virtqueue[index]->async_registered) {
+ if (dev->virtqueue[index]->async_pkts_inflight_n) {
+ VHOST_LOG_CONFIG(ERR, "failed to enable vring. "
+ "async inflight packets must be completed first\n");
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+ }
dev->virtqueue[index]->enabled = enable;
rte_vhost_driver_get_features(dev->ifname, &features);
rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
- /*
- * REPLY_ACK protocol feature is only mandatory for now
- * for IOMMU feature. If IOMMU is explicitly disabled by the
- * application, disable also REPLY_ACK feature for older buggy
- * Qemu versions (from v2.7.0 to v2.9.0).
- */
- if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
- protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
-
msg->payload.u64 = protocol_features;
msg->size = sizeof(msg->payload.u64);
msg->fd_num = 0;
VHOST_LOG_CONFIG(ERR,
"invalid log base msg size: %"PRId32" != %d\n",
msg->size, (int)sizeof(VhostUserLog));
- return RTE_VHOST_MSG_RESULT_ERR;
+ goto close_msg_fds;
}
size = msg->payload.log.mmap_size;
off = msg->payload.log.mmap_offset;
- /* Don't allow mmap_offset to point outside the mmap region */
- if (off > size) {
+ /* Check for mmap size and offset overflow. */
+ if (off >= -size) {
VHOST_LOG_CONFIG(ERR,
- "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
+ "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
off, size);
- return RTE_VHOST_MSG_RESULT_ERR;
+ goto close_msg_fds;
}
VHOST_LOG_CONFIG(INFO,
msg->fd_num = 0;
return RTE_VHOST_MSG_RESULT_REPLY;
+
+close_msg_fds:
+ close_msg_fds(msg);
+ return RTE_VHOST_MSG_RESULT_ERR;
}
static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
struct virtio_net *dev = *pdev;
uint8_t *mac = (uint8_t *)&msg->payload.u64;
struct rte_vdpa_device *vdpa_dev;
- int did = -1;
if (validate_msg_fds(msg, 0) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
* copied before the flag is set.
*/
__atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
- did = dev->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
+ vdpa_dev = dev->vdpa_dev;
if (vdpa_dev && vdpa_dev->ops->migration_done)
vdpa_dev->ops->migration_done(dev->vid);
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
+ if (!vq)
+ continue;
+
vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
len, imsg->perm);
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
+ if (!vq)
+ continue;
+
vhost_user_iotlb_cache_remove(vq, imsg->iova,
imsg->size);
return RTE_VHOST_MSG_RESULT_REPLY;
}
+static int
+vhost_user_get_status(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ msg->payload.u64 = dev->status;
+ msg->size = sizeof(msg->payload.u64);
+ msg->fd_num = 0;
+
+ return RTE_VHOST_MSG_RESULT_REPLY;
+}
+
+static int
+vhost_user_set_status(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ /* As per Virtio specification, the device status is 8bits long */
+ if (msg->payload.u64 > UINT8_MAX) {
+ VHOST_LOG_CONFIG(ERR, "Invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n",
+ msg->payload.u64);
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ dev->status = msg->payload.u64;
+
+ if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) &&
+ (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) {
+ VHOST_LOG_CONFIG(ERR, "FEATURES_OK bit is set but feature negotiation failed\n");
+ /*
+ * Clear the bit to let the driver know about the feature
+ * negotiation failure
+ */
+ dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
+ }
+
+ VHOST_LOG_CONFIG(INFO, "New device status(0x%08x):\n"
+ "\t-RESET: %u\n"
+ "\t-ACKNOWLEDGE: %u\n"
+ "\t-DRIVER: %u\n"
+ "\t-FEATURES_OK: %u\n"
+ "\t-DRIVER_OK: %u\n"
+ "\t-DEVICE_NEED_RESET: %u\n"
+ "\t-FAILED: %u\n",
+ dev->status,
+ (dev->status == VIRTIO_DEVICE_STATUS_RESET),
+ !!(dev->status & VIRTIO_DEVICE_STATUS_ACK),
+ !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER),
+ !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK),
+ !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK),
+ !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET),
+ !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED));
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
struct VhostUserMsg *msg,
int main_fd);
[VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
[VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd,
[VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd,
+ [VHOST_USER_SET_STATUS] = vhost_user_set_status,
+ [VHOST_USER_GET_STATUS] = vhost_user_get_status,
};
/* return bytes# of read on success or negative val on failure. */
vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
struct VhostUserMsg *msg)
{
- uint16_t vring_idx;
+ uint32_t vring_idx;
switch (msg->request.master) {
case VHOST_USER_SET_VRING_KICK:
struct virtio_net *dev;
struct VhostUserMsg msg;
struct rte_vdpa_device *vdpa_dev;
- int did = -1;
int ret;
int unlock_required = 0;
bool handled;
int request;
+ uint32_t i;
dev = get_device(vid);
if (dev == NULL)
case VHOST_USER_SEND_RARP:
case VHOST_USER_NET_SET_MTU:
case VHOST_USER_SET_SLAVE_REQ_FD:
- vhost_user_lock_all_queue_pairs(dev);
- unlock_required = 1;
+ if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
+ vhost_user_lock_all_queue_pairs(dev);
+ unlock_required = 1;
+ }
break;
default:
break;
return -1;
}
- if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
- dev->flags |= VIRTIO_DEV_READY;
-
- if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
- if (dev->dequeue_zero_copy) {
- VHOST_LOG_CONFIG(INFO,
- "dequeue zero copy is enabled\n");
- }
+ for (i = 0; i < dev->nr_vring; i++) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+ bool cur_ready = vq_is_ready(dev, vq);
- if (dev->notify_ops->new_device(dev->vid) == 0)
- dev->flags |= VIRTIO_DEV_RUNNING;
+ if (cur_ready != (vq && vq->ready)) {
+ vq->ready = cur_ready;
+ vhost_user_notify_queue_state(dev, i, cur_ready);
}
}
- did = dev->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
- if (vdpa_dev && virtio_is_ready(dev) &&
- !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
- msg.request.master == VHOST_USER_SET_VRING_CALL) {
- if (vdpa_dev->ops->dev_conf)
- vdpa_dev->ops->dev_conf(dev->vid);
- dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
+
+ if (!virtio_is_ready(dev))
+ goto out;
+
+ /*
+ * Virtio is now ready. If not done already, it is time
+ * to notify the application it can process the rings and
+ * configure the vDPA device if present.
+ */
+
+ if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
+ if (dev->notify_ops->new_device(dev->vid) == 0)
+ dev->flags |= VIRTIO_DEV_RUNNING;
}
+ vdpa_dev = dev->vdpa_dev;
+ if (!vdpa_dev)
+ goto out;
+
+ if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
+ if (vdpa_dev->ops->dev_conf(dev->vid))
+ VHOST_LOG_CONFIG(ERR,
+ "Failed to configure vDPA device\n");
+ else
+ dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
+ }
+
+out:
return 0;
}
return process_slave_message_reply(dev, &msg);
}
-int rte_vhost_host_notifier_ctrl(int vid, bool enable)
+int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable)
{
struct virtio_net *dev;
struct rte_vdpa_device *vdpa_dev;
- int vfio_device_fd, did, ret = 0;
+ int vfio_device_fd, ret = 0;
uint64_t offset, size;
- unsigned int i;
+ unsigned int i, q_start, q_last;
dev = get_device(vid);
if (!dev)
return -ENODEV;
- did = dev->vdpa_dev_id;
- if (did < 0)
- return -EINVAL;
+ vdpa_dev = dev->vdpa_dev;
+ if (vdpa_dev == NULL)
+ return -ENODEV;
if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
(1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
return -ENOTSUP;
- vdpa_dev = rte_vdpa_get_device(did);
- if (!vdpa_dev)
- return -ENODEV;
+ if (qid == RTE_VHOST_QUEUE_ALL) {
+ q_start = 0;
+ q_last = dev->nr_vring - 1;
+ } else {
+ if (qid >= dev->nr_vring)
+ return -EINVAL;
+ q_start = qid;
+ q_last = qid;
+ }
RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
return -ENOTSUP;
if (enable) {
- for (i = 0; i < dev->nr_vring; i++) {
+ for (i = q_start; i <= q_last; i++) {
if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
&size) < 0) {
ret = -ENOTSUP;
}
} else {
disable:
- for (i = 0; i < dev->nr_vring; i++) {
+ for (i = q_start; i <= q_last; i++) {
vhost_user_slave_set_vring_host_notifier(dev, i, -1,
0, 0);
}