+ dev->log_addr = (uint64_t)(uintptr_t)addr;
+ dev->log_base = dev->log_addr + off;
+ dev->log_size = size;
+
+ /*
+ * The spec is not clear about it (yet), but QEMU doesn't expect
+ * any payload in the reply.
+ */
+ msg->size = 0;
+ msg->fd_num = 0;
+
+ return RTE_VHOST_MSG_RESULT_REPLY;
+
+close_msg_fds:
+ close_msg_fds(msg);
+ return RTE_VHOST_MSG_RESULT_ERR;
+}
+
+static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ if (validate_msg_fds(msg, 1) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ close(msg->fds[0]);
+ VHOST_LOG_CONFIG(INFO, "not implemented.\n");
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+/*
+ * An rarp packet is constructed and broadcasted to notify switches about
+ * the new location of the migrated VM, so that packets from outside will
+ * not be lost after migration.
+ *
+ * However, we don't actually "send" a rarp packet here, instead, we set
+ * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
+ */
+static int
+vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ uint8_t *mac = (uint8_t *)&msg->payload.u64;
+ struct rte_vdpa_device *vdpa_dev;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ VHOST_LOG_CONFIG(DEBUG,
+ ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ memcpy(dev->mac.addr_bytes, mac, 6);
+
+ /*
+ * Set the flag to inject a RARP broadcast packet at
+ * rte_vhost_dequeue_burst().
+ *
+ * __ATOMIC_RELEASE ordering is for making sure the mac is
+ * copied before the flag is set.
+ */
+ __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
+ vdpa_dev = dev->vdpa_dev;
+ if (vdpa_dev && vdpa_dev->ops->migration_done)
+ vdpa_dev->ops->migration_done(dev->vid);
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ if (msg->payload.u64 < VIRTIO_MIN_MTU ||
+ msg->payload.u64 > VIRTIO_MAX_MTU) {
+ VHOST_LOG_CONFIG(ERR, "Invalid MTU size (%"PRIu64")\n",
+ msg->payload.u64);
+
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ dev->mtu = msg->payload.u64;
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ int fd = msg->fds[0];
+
+ if (validate_msg_fds(msg, 1) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ if (fd < 0) {
+ VHOST_LOG_CONFIG(ERR,
+ "Invalid file descriptor for slave channel (%d)\n",
+ fd);
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ if (dev->slave_req_fd >= 0)
+ close(dev->slave_req_fd);
+
+ dev->slave_req_fd = fd;
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
+{
+ struct vhost_vring_addr *ra;
+ uint64_t start, end, len;
+
+ start = imsg->iova;
+ end = start + imsg->size;
+
+ ra = &vq->ring_addrs;
+ len = sizeof(struct vring_desc) * vq->size;
+ if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
+ return 1;
+
+ len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
+ if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
+ return 1;
+
+ len = sizeof(struct vring_used) +
+ sizeof(struct vring_used_elem) * vq->size;
+ if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
+ return 1;
+
+ if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
+ len = sizeof(uint64_t);
+ if (ra->log_guest_addr < end &&
+ (ra->log_guest_addr + len) > start)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
+{
+ struct vhost_vring_addr *ra;
+ uint64_t start, end, len;
+
+ start = imsg->iova;
+ end = start + imsg->size;
+
+ ra = &vq->ring_addrs;
+ len = sizeof(struct vring_packed_desc) * vq->size;
+ if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
+ return 1;
+
+ len = sizeof(struct vring_packed_desc_event);
+ if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
+ return 1;
+
+ len = sizeof(struct vring_packed_desc_event);
+ if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
+ return 1;
+
+ if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
+ len = sizeof(uint64_t);
+ if (ra->log_guest_addr < end &&
+ (ra->log_guest_addr + len) > start)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int is_vring_iotlb(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct vhost_iotlb_msg *imsg)
+{
+ if (vq_is_packed(dev))
+ return is_vring_iotlb_packed(vq, imsg);
+ else
+ return is_vring_iotlb_split(vq, imsg);
+}
+
+static int
+vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
+ uint16_t i;
+ uint64_t vva, len;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ switch (imsg->type) {
+ case VHOST_IOTLB_UPDATE:
+ len = imsg->size;
+ vva = qva_to_vva(dev, imsg->uaddr, &len);
+ if (!vva)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ for (i = 0; i < dev->nr_vring; i++) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+ if (!vq)
+ continue;
+
+ vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
+ len, imsg->perm);
+
+ if (is_vring_iotlb(dev, vq, imsg))
+ *pdev = dev = translate_ring_addresses(dev, i);
+ }
+ break;
+ case VHOST_IOTLB_INVALIDATE:
+ for (i = 0; i < dev->nr_vring; i++) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+ if (!vq)
+ continue;
+
+ vhost_user_iotlb_cache_remove(vq, imsg->iova,
+ imsg->size);
+
+ if (is_vring_iotlb(dev, vq, imsg))
+ vring_invalidate(dev, vq);
+ }
+ break;
+ default:
+ VHOST_LOG_CONFIG(ERR, "Invalid IOTLB message type (%d)\n",
+ imsg->type);
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_set_postcopy_advise(struct virtio_net **pdev,
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+#ifdef RTE_LIBRTE_VHOST_POSTCOPY
+ struct uffdio_api api_struct;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+
+ if (dev->postcopy_ufd == -1) {
+ VHOST_LOG_CONFIG(ERR, "Userfaultfd not available: %s\n",
+ strerror(errno));
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+ api_struct.api = UFFD_API;
+ api_struct.features = 0;
+ if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
+ VHOST_LOG_CONFIG(ERR, "UFFDIO_API ioctl failure: %s\n",
+ strerror(errno));
+ close(dev->postcopy_ufd);
+ dev->postcopy_ufd = -1;
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+ msg->fds[0] = dev->postcopy_ufd;
+ msg->fd_num = 1;
+
+ return RTE_VHOST_MSG_RESULT_REPLY;
+#else
+ dev->postcopy_ufd = -1;
+ msg->fd_num = 0;
+
+ return RTE_VHOST_MSG_RESULT_ERR;
+#endif
+}
+
+static int
+vhost_user_set_postcopy_listen(struct virtio_net **pdev,
+ struct VhostUserMsg *msg __rte_unused,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ if (dev->mem && dev->mem->nregions) {
+ VHOST_LOG_CONFIG(ERR,
+ "Regions already registered at postcopy-listen\n");
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+ dev->postcopy_listening = 1;
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_postcopy_end(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ dev->postcopy_listening = 0;
+ if (dev->postcopy_ufd >= 0) {
+ close(dev->postcopy_ufd);
+ dev->postcopy_ufd = -1;
+ }