return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
}
+/*
+ * Reclaim all the outstanding zmbufs for a virtqueue.
+ */
+static void
+drain_zmbuf_list(struct vhost_virtqueue *vq)
+{
+ struct zcopy_mbuf *zmbuf, *next;
+
+ for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+ zmbuf != NULL; zmbuf = next) {
+ next = TAILQ_NEXT(zmbuf, next);
+
+ while (!mbuf_is_consumed(zmbuf->mbuf))
+ usleep(1000);
+
+ TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+ restore_mbuf(zmbuf->mbuf);
+ rte_pktmbuf_free(zmbuf->mbuf);
+ put_zmbuf(zmbuf);
+ vq->nr_zmbuf -= 1;
+ }
+}
+
static void
free_mem_region(struct virtio_net *dev)
{
uint32_t i;
struct rte_vhost_mem_region *reg;
+ struct vhost_virtqueue *vq;
if (!dev || !dev->mem)
return;
+ if (dev->dequeue_zero_copy) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ vq = dev->virtqueue[i];
+ if (vq)
+ drain_zmbuf_list(vq);
+ }
+ }
+
for (i = 0; i < dev->mem->nregions; i++) {
reg = &dev->mem->regions[i];
if (reg->host_user_addr) {
struct VhostUserMsg *msg __rte_unused,
int main_fd __rte_unused)
{
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
cleanup_device(dev, 0);
reset_device(dev);
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
/*
msg->size = sizeof(msg->payload.u64);
msg->fd_num = 0;
- return VH_RESULT_REPLY;
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
/*
msg->size = sizeof(msg->payload.u64);
msg->fd_num = 0;
- return VH_RESULT_REPLY;
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
/*
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) received invalid negotiated features.\n",
dev->vid);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
if (dev->flags & VIRTIO_DEV_RUNNING) {
if (dev->features == features)
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
/*
* Error out if master tries to change features while device is
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) features changed while device is running.\n",
dev->vid);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
if (dev->notify_ops->features_changed)
} else {
dev->vhost_hlen = sizeof(struct virtio_net_hdr);
}
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "negotiated Virtio features: 0x%" PRIx64 "\n", dev->features);
VHOST_LOG_DEBUG(VHOST_CONFIG,
"(%d) mergeable RX buffers %s, virtio 1 %s\n",
dev->vid,
if (vdpa_dev && vdpa_dev->ops->set_features)
vdpa_dev->ops->set_features(dev->vid);
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
/*
if ((vq->size & (vq->size - 1)) || vq->size > 32768) {
RTE_LOG(ERR, VHOST_CONFIG,
"invalid virtqueue size %u\n", vq->size);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
if (dev->dequeue_zero_copy) {
if (!vq->shadow_used_packed) {
RTE_LOG(ERR, VHOST_CONFIG,
"failed to allocate memory for shadow used ring.\n");
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
} else {
if (!vq->shadow_used_split) {
RTE_LOG(ERR, VHOST_CONFIG,
"failed to allocate memory for shadow used ring.\n");
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
}
if (!vq->batch_copy_elems) {
RTE_LOG(ERR, VHOST_CONFIG,
"failed to allocate memory for batching copy.\n");
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
/*
struct batch_copy_elem *new_batch_copy_elems;
int ret;
+ if (dev->flags & VIRTIO_DEV_RUNNING)
+ return dev;
+
old_dev = dev;
vq = old_vq = dev->virtqueue[index];
struct rte_vhost_mem_region *r;
uint32_t i;
+ if (unlikely(!dev || !dev->mem))
+ goto out_error;
+
/* Find the region where the address lives. */
for (i = 0; i < dev->mem->nregions; i++) {
r = &dev->mem->regions[i];
r->host_user_addr;
}
}
+out_error:
*len = 0;
return 0;
{
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
uint64_t vva;
+ uint64_t req_size = *size;
vva = vhost_user_iotlb_cache_find(vq, ra,
size, VHOST_ACCESS_RW);
- if (!vva)
- vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
+ if (req_size != *size)
+ vhost_user_iotlb_miss(dev, (ra + *size),
+ VHOST_ACCESS_RW);
return vva;
}
return qva_to_vva(dev, ra, size);
}
+/*
+ * Converts vring log address to GPA
+ * If IOMMU is enabled, the log address is IOVA
+ * If IOMMU not enabled, the log address is already GPA
+ */
+static uint64_t
+translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t log_addr)
+{
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
+ const uint64_t exp_size = sizeof(struct vring_used) +
+ sizeof(struct vring_used_elem) * vq->size;
+ uint64_t hva, gpa;
+ uint64_t size = exp_size;
+
+ hva = vhost_iova_to_vva(dev, vq, log_addr,
+ &size, VHOST_ACCESS_RW);
+ if (size != exp_size)
+ return 0;
+
+ gpa = hva_to_gpa(dev, hva, exp_size);
+ if (!gpa) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "VQ: Failed to find GPA for log_addr: 0x%" PRIx64 " hva: 0x%" PRIx64 "\n",
+ log_addr, hva);
+ return 0;
+ }
+ return gpa;
+
+ } else
+ return log_addr;
+}
+
static struct virtio_net *
translate_ring_addresses(struct virtio_net *dev, int vq_index)
{
struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
struct vhost_vring_addr *addr = &vq->ring_addrs;
- uint64_t len;
+ uint64_t len, expected_len;
if (vq_is_packed(dev)) {
len = sizeof(struct vring_packed_desc) * vq->size;
return dev;
}
+ vq->access_ok = 1;
return dev;
}
addr = &vq->ring_addrs;
len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ len += sizeof(uint16_t);
+ expected_len = len;
vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->avail_user_addr, &len);
- if (vq->avail == 0 ||
- len != sizeof(struct vring_avail) +
- sizeof(uint16_t) * vq->size) {
+ if (vq->avail == 0 || len != expected_len) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to map avail ring.\n",
dev->vid);
len = sizeof(struct vring_used) +
sizeof(struct vring_used_elem) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ len += sizeof(uint16_t);
+ expected_len = len;
vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->used_user_addr, &len);
- if (vq->used == 0 || len != sizeof(struct vring_used) +
- sizeof(struct vring_used_elem) * vq->size) {
+ if (vq->used == 0 || len != expected_len) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to map used ring.\n",
dev->vid);
vq->last_avail_idx = vq->used->idx;
}
- vq->log_guest_addr = addr->log_guest_addr;
+ vq->log_guest_addr =
+ translate_log_addr(dev, vq, addr->log_guest_addr);
+ if (vq->log_guest_addr == 0) {
+ RTE_LOG(DEBUG, VHOST_CONFIG,
+ "(%d) failed to map log_guest_addr .\n",
+ dev->vid);
+ return dev;
+ }
+ vq->access_ok = 1;
VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
dev->vid, vq->desc);
struct virtio_net *dev = *pdev;
struct vhost_virtqueue *vq;
struct vhost_vring_addr *addr = &msg->payload.addr;
+ bool access_ok;
if (dev->mem == NULL)
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
/* addr->index refers to the queue index. The txq 1, rxq is 0. */
vq = dev->virtqueue[msg->payload.addr.index];
+ access_ok = vq->access_ok;
+
/*
* Rings addresses should not be interpreted as long as the ring is not
* started and enabled
vring_invalidate(dev, vq);
- if (vq->enabled && (dev->features &
- (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
+ if ((vq->enabled && (dev->features &
+ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) ||
+ access_ok) {
dev = translate_ring_addresses(dev, msg->payload.addr.index);
if (!dev)
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
*pdev = dev;
}
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
/*
vq->last_avail_idx = msg->payload.state.num;
}
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
uint64_t host_phys_addr, uint64_t size)
{
struct guest_page *page, *last_page;
+ struct guest_page *old_pages;
if (dev->nr_guest_pages == dev->max_guest_pages) {
dev->max_guest_pages *= 2;
+ old_pages = dev->guest_pages;
dev->guest_pages = realloc(dev->guest_pages,
dev->max_guest_pages * sizeof(*page));
if (!dev->guest_pages) {
RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n");
+ free(old_pages);
return -1;
}
}
if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
RTE_LOG(ERR, VHOST_CONFIG,
"too many memory regions (%u)\n", memory->nregions);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
for (i = 0; i < memory->nregions; i++)
close(msg->fds[i]);
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
if (dev->mem) {
"(%d) failed to allocate memory "
"for dev->guest_pages\n",
dev->vid);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
}
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev->mem\n",
dev->vid);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
dev->mem->nregions = memory->nregions;
dump_guest_pages(dev);
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
err_mmap:
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
static bool
vq->callfd = file.fd;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
close(msg->fds[0]);
RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
/* Interpret ring addresses only when ring is started. */
dev = translate_ring_addresses(dev, file.index);
if (!dev)
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
*pdev = dev;
* the ring starts already enabled. Otherwise, it is enabled via
* the SET_VRING_ENABLE message.
*/
- if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
+ if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
vq->enabled = 1;
+ if (dev->notify_ops->vring_state_changed)
+ dev->notify_ops->vring_state_changed(
+ dev->vid, file.index, 1);
+ }
if (vq->kickfd >= 0)
close(vq->kickfd);
vq->kickfd = file.fd;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static void
free_zmbufs(struct vhost_virtqueue *vq)
{
- struct zcopy_mbuf *zmbuf, *next;
-
- for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
- zmbuf != NULL; zmbuf = next) {
- next = TAILQ_NEXT(zmbuf, next);
-
- rte_pktmbuf_free(zmbuf->mbuf);
- TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
- }
+ drain_zmbuf_list(vq);
rte_free(vq->zmbufs);
}
vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ vq->signalled_used_valid = false;
+
if (dev->dequeue_zero_copy)
free_zmbufs(vq);
if (vq_is_packed(dev)) {
msg->size = sizeof(msg->payload.state);
msg->fd_num = 0;
- return VH_RESULT_REPLY;
+ vring_invalidate(dev, vq);
+
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
/*
dev->notify_ops->vring_state_changed(dev->vid,
index, enable);
+ /* On disable, rings have to be stopped being processed. */
+ if (!enable && dev->dequeue_zero_copy)
+ drain_zmbuf_list(dev->virtqueue[index]);
+
dev->virtqueue[index]->enabled = enable;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
msg->size = sizeof(msg->payload.u64);
msg->fd_num = 0;
- return VH_RESULT_REPLY;
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
static int
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) received invalid protocol features.\n",
dev->vid);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
dev->protocol_features = protocol_features;
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
+ dev->protocol_features);
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
if (fd < 0) {
RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
if (msg->size != sizeof(VhostUserLog)) {
RTE_LOG(ERR, VHOST_CONFIG,
"invalid log base msg size: %"PRId32" != %d\n",
msg->size, (int)sizeof(VhostUserLog));
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
size = msg->payload.log.mmap_size;
RTE_LOG(ERR, VHOST_CONFIG,
"log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
off, size);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
RTE_LOG(INFO, VHOST_CONFIG,
close(fd);
if (addr == MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
/*
msg->size = 0;
msg->fd_num = 0;
- return VH_RESULT_REPLY;
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
close(msg->fds[0]);
RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
/*
if (vdpa_dev && vdpa_dev->ops->migration_done)
vdpa_dev->ops->migration_done(dev->vid);
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
msg->payload.u64);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
dev->mtu = msg->payload.u64;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
RTE_LOG(ERR, VHOST_CONFIG,
"Invalid file descriptor for slave channel (%d)\n",
fd);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
+ if (dev->slave_req_fd >= 0)
+ close(dev->slave_req_fd);
+
dev->slave_req_fd = fd;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
-is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
+is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
{
struct vhost_vring_addr *ra;
- uint64_t start, end;
+ uint64_t start, end, len;
start = imsg->iova;
end = start + imsg->size;
ra = &vq->ring_addrs;
- if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
+ len = sizeof(struct vring_desc) * vq->size;
+ if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
return 1;
- if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
+
+ len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
+ if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
return 1;
- if (ra->used_user_addr >= start && ra->used_user_addr < end)
+
+ len = sizeof(struct vring_used) +
+ sizeof(struct vring_used_elem) * vq->size;
+ if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
return 1;
return 0;
}
static int
-is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
- struct vhost_iotlb_msg *imsg)
+is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
{
- uint64_t istart, iend, vstart, vend;
+ struct vhost_vring_addr *ra;
+ uint64_t start, end, len;
- istart = imsg->iova;
- iend = istart + imsg->size - 1;
+ start = imsg->iova;
+ end = start + imsg->size;
- vstart = (uintptr_t)vq->desc;
- vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
- if (vstart <= iend && istart <= vend)
+ ra = &vq->ring_addrs;
+ len = sizeof(struct vring_packed_desc) * vq->size;
+ if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
return 1;
- vstart = (uintptr_t)vq->avail;
- vend = vstart + sizeof(struct vring_avail);
- vend += sizeof(uint16_t) * vq->size - 1;
- if (vstart <= iend && istart <= vend)
+ len = sizeof(struct vring_packed_desc_event);
+ if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
return 1;
- vstart = (uintptr_t)vq->used;
- vend = vstart + sizeof(struct vring_used);
- vend += sizeof(struct vring_used_elem) * vq->size - 1;
- if (vstart <= iend && istart <= vend)
+ len = sizeof(struct vring_packed_desc_event);
+ if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
return 1;
return 0;
}
+static int is_vring_iotlb(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct vhost_iotlb_msg *imsg)
+{
+ if (vq_is_packed(dev))
+ return is_vring_iotlb_packed(vq, imsg);
+ else
+ return is_vring_iotlb_split(vq, imsg);
+}
+
static int
vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
int main_fd __rte_unused)
len = imsg->size;
vva = qva_to_vva(dev, imsg->uaddr, &len);
if (!vva)
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
len, imsg->perm);
- if (is_vring_iotlb_update(vq, imsg))
+ if (is_vring_iotlb(dev, vq, imsg))
*pdev = dev = translate_ring_addresses(dev, i);
}
break;
vhost_user_iotlb_cache_remove(vq, imsg->iova,
imsg->size);
- if (is_vring_iotlb_invalidate(vq, imsg))
+ if (is_vring_iotlb(dev, vq, imsg))
vring_invalidate(dev, vq);
}
break;
default:
RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
imsg->type);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
if (dev->postcopy_ufd == -1) {
RTE_LOG(ERR, VHOST_CONFIG, "Userfaultfd not available: %s\n",
strerror(errno));
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
api_struct.api = UFFD_API;
api_struct.features = 0;
strerror(errno));
close(dev->postcopy_ufd);
dev->postcopy_ufd = -1;
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
msg->fds[0] = dev->postcopy_ufd;
msg->fd_num = 1;
- return VH_RESULT_REPLY;
+ return RTE_VHOST_MSG_RESULT_REPLY;
#else
dev->postcopy_ufd = -1;
msg->fd_num = 0;
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
#endif
}
if (dev->mem && dev->mem->nregions) {
RTE_LOG(ERR, VHOST_CONFIG,
"Regions already registered at postcopy-listen\n");
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
dev->postcopy_listening = 1;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
msg->size = sizeof(msg->payload.u64);
msg->fd_num = 0;
- return VH_RESULT_REPLY;
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
int did = -1;
int ret;
int unlock_required = 0;
- uint32_t skip_master = 0;
+ bool handled;
int request;
dev = get_device(vid);
}
ret = read_vhost_message(fd, &msg);
- if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
+ if (ret <= 0) {
if (ret < 0)
RTE_LOG(ERR, VHOST_CONFIG,
"vhost read message failed\n");
- else if (ret == 0)
+ else
RTE_LOG(INFO, VHOST_CONFIG,
"vhost peer closed\n");
- else
- RTE_LOG(ERR, VHOST_CONFIG,
- "vhost read incorrect message\n");
return -1;
}
ret = 0;
- if (msg.request.master != VHOST_USER_IOTLB_MSG)
- RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
- vhost_message_str[msg.request.master]);
- else
- RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
- vhost_message_str[msg.request.master]);
+ request = msg.request.master;
+ if (request > VHOST_USER_NONE && request < VHOST_USER_MAX &&
+ vhost_message_str[request]) {
+ if (request != VHOST_USER_IOTLB_MSG)
+ RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
+ vhost_message_str[request]);
+ else
+ RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
+ vhost_message_str[request]);
+ } else {
+ RTE_LOG(DEBUG, VHOST_CONFIG, "External request %d\n", request);
+ }
ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
if (ret < 0) {
* inactive, so it is safe. Otherwise taking the access_lock
* would cause a dead lock.
*/
- switch (msg.request.master) {
+ switch (request) {
case VHOST_USER_SET_FEATURES:
case VHOST_USER_SET_PROTOCOL_FEATURES:
case VHOST_USER_SET_OWNER:
}
+ handled = false;
if (dev->extern_ops.pre_msg_handle) {
ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
- (void *)&msg, &skip_master);
- if (ret == VH_RESULT_ERR)
- goto skip_to_reply;
- else if (ret == VH_RESULT_REPLY)
+ (void *)&msg);
+ switch (ret) {
+ case RTE_VHOST_MSG_RESULT_REPLY:
send_vhost_reply(fd, &msg);
-
- if (skip_master)
+ /* Fall-through */
+ case RTE_VHOST_MSG_RESULT_ERR:
+ case RTE_VHOST_MSG_RESULT_OK:
+ handled = true;
goto skip_to_post_handle;
+ case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
+ default:
+ break;
+ }
}
- request = msg.request.master;
if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
if (!vhost_message_handlers[request])
goto skip_to_post_handle;
ret = vhost_message_handlers[request](&dev, &msg, fd);
switch (ret) {
- case VH_RESULT_ERR:
+ case RTE_VHOST_MSG_RESULT_ERR:
RTE_LOG(ERR, VHOST_CONFIG,
"Processing %s failed.\n",
vhost_message_str[request]);
+ handled = true;
break;
- case VH_RESULT_OK:
+ case RTE_VHOST_MSG_RESULT_OK:
RTE_LOG(DEBUG, VHOST_CONFIG,
"Processing %s succeeded.\n",
vhost_message_str[request]);
+ handled = true;
break;
- case VH_RESULT_REPLY:
+ case RTE_VHOST_MSG_RESULT_REPLY:
RTE_LOG(DEBUG, VHOST_CONFIG,
"Processing %s succeeded and needs reply.\n",
vhost_message_str[request]);
send_vhost_reply(fd, &msg);
+ handled = true;
+ break;
+ default:
break;
}
- } else {
- RTE_LOG(ERR, VHOST_CONFIG,
- "Requested invalid message type %d.\n", request);
- ret = VH_RESULT_ERR;
}
skip_to_post_handle:
- if (ret != VH_RESULT_ERR && dev->extern_ops.post_msg_handle) {
- ret = (*dev->extern_ops.post_msg_handle)(
- dev->vid, (void *)&msg);
- if (ret == VH_RESULT_ERR)
- goto skip_to_reply;
- else if (ret == VH_RESULT_REPLY)
+ if (ret != RTE_VHOST_MSG_RESULT_ERR &&
+ dev->extern_ops.post_msg_handle) {
+ ret = (*dev->extern_ops.post_msg_handle)(dev->vid,
+ (void *)&msg);
+ switch (ret) {
+ case RTE_VHOST_MSG_RESULT_REPLY:
send_vhost_reply(fd, &msg);
+ /* Fall-through */
+ case RTE_VHOST_MSG_RESULT_ERR:
+ case RTE_VHOST_MSG_RESULT_OK:
+ handled = true;
+ case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
+ default:
+ break;
+ }
}
-skip_to_reply:
if (unlock_required)
vhost_user_unlock_all_queue_pairs(dev);
+ /* If message was not handled at this stage, treat it as an error */
+ if (!handled) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "vhost message (req: %d) was not handled.\n", request);
+ ret = RTE_VHOST_MSG_RESULT_ERR;
+ }
+
/*
* If the request required a reply that was already sent,
* this optional reply-ack won't be sent as the
* VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
*/
if (msg.flags & VHOST_USER_NEED_REPLY) {
- msg.payload.u64 = ret == VH_RESULT_ERR;
+ msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
msg.size = sizeof(msg.payload.u64);
msg.fd_num = 0;
send_vhost_reply(fd, &msg);
- } else if (ret == VH_RESULT_ERR) {
+ } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
RTE_LOG(ERR, VHOST_CONFIG,
"vhost message handling failed.\n");
return -1;
vdpa_dev = rte_vdpa_get_device(did);
if (vdpa_dev && virtio_is_ready(dev) &&
!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
- msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
+ msg.request.master == VHOST_USER_SET_VRING_CALL) {
if (vdpa_dev->ops->dev_conf)
vdpa_dev->ops->dev_conf(dev->vid);
dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
- if (vhost_user_host_notifier_ctrl(dev->vid, true) != 0) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "(%d) software relay is used for vDPA, performance may be low.\n",
- dev->vid);
- }
}
return 0;
return process_slave_message_reply(dev, &msg);
}
-int vhost_user_host_notifier_ctrl(int vid, bool enable)
+int rte_vhost_host_notifier_ctrl(int vid, bool enable)
{
struct virtio_net *dev;
struct rte_vdpa_device *vdpa_dev;