+ if (enable && dev->virtqueue[index]->async_registered) {
+ if (dev->virtqueue[index]->async_pkts_inflight_n) {
+ VHOST_LOG_CONFIG(ERR, "failed to enable vring. "
+ "async inflight packets must be completed first\n");
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+ }
+
+ dev->virtqueue[index]->enabled = enable;
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_get_protocol_features(struct virtio_net **pdev,
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ uint64_t features, protocol_features;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ rte_vhost_driver_get_features(dev->ifname, &features);
+ rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
+
+ msg->payload.u64 = protocol_features;
+ msg->size = sizeof(msg->payload.u64);
+ msg->fd_num = 0;
+
+ return RTE_VHOST_MSG_RESULT_REPLY;
+}
+
+static int
+vhost_user_set_protocol_features(struct virtio_net **pdev,
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ uint64_t protocol_features = msg->payload.u64;
+ uint64_t slave_protocol_features = 0;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ rte_vhost_driver_get_protocol_features(dev->ifname,
+ &slave_protocol_features);
+ if (protocol_features & ~slave_protocol_features) {
+ VHOST_LOG_CONFIG(ERR,
+ "(%d) received invalid protocol features.\n",
+ dev->vid);
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ dev->protocol_features = protocol_features;
+ VHOST_LOG_CONFIG(INFO,
+ "negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
+ dev->protocol_features);
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ int fd = msg->fds[0];
+ uint64_t size, off;
+ void *addr;
+
+ if (validate_msg_fds(msg, 1) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ if (fd < 0) {
+ VHOST_LOG_CONFIG(ERR, "invalid log fd: %d\n", fd);
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ if (msg->size != sizeof(VhostUserLog)) {
+ VHOST_LOG_CONFIG(ERR,
+ "invalid log base msg size: %"PRId32" != %d\n",
+ msg->size, (int)sizeof(VhostUserLog));
+ goto close_msg_fds;
+ }
+
+ size = msg->payload.log.mmap_size;
+ off = msg->payload.log.mmap_offset;
+
+ /* Check for mmap size and offset overflow. */
+ if (off >= -size) {
+ VHOST_LOG_CONFIG(ERR,
+ "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
+ off, size);
+ goto close_msg_fds;
+ }
+
+ VHOST_LOG_CONFIG(INFO,
+ "log mmap size: %"PRId64", offset: %"PRId64"\n",
+ size, off);
+
+ /*
+ * mmap from 0 to workaround a hugepage mmap bug: mmap will
+ * fail when offset is not page size aligned.
+ */
+ addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+ if (addr == MAP_FAILED) {
+ VHOST_LOG_CONFIG(ERR, "mmap log base failed!\n");
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ /*
+ * Free previously mapped log memory on occasionally
+ * multiple VHOST_USER_SET_LOG_BASE.
+ */
+ if (dev->log_addr) {
+ munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
+ }
+ dev->log_addr = (uint64_t)(uintptr_t)addr;
+ dev->log_base = dev->log_addr + off;
+ dev->log_size = size;
+
+ /*
+ * The spec is not clear about it (yet), but QEMU doesn't expect
+ * any payload in the reply.
+ */
+ msg->size = 0;
+ msg->fd_num = 0;
+
+ return RTE_VHOST_MSG_RESULT_REPLY;
+
+close_msg_fds:
+ close_msg_fds(msg);
+ return RTE_VHOST_MSG_RESULT_ERR;
+}
+
+static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ if (validate_msg_fds(msg, 1) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ close(msg->fds[0]);
+ VHOST_LOG_CONFIG(INFO, "not implemented.\n");
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+/*
+ * An rarp packet is constructed and broadcasted to notify switches about
+ * the new location of the migrated VM, so that packets from outside will
+ * not be lost after migration.
+ *
+ * However, we don't actually "send" a rarp packet here, instead, we set
+ * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
+ */
+static int
+vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ uint8_t *mac = (uint8_t *)&msg->payload.u64;
+ struct rte_vdpa_device *vdpa_dev;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ VHOST_LOG_CONFIG(DEBUG,
+ ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ memcpy(dev->mac.addr_bytes, mac, 6);
+
+ /*
+ * Set the flag to inject a RARP broadcast packet at
+ * rte_vhost_dequeue_burst().
+ *
+ * __ATOMIC_RELEASE ordering is for making sure the mac is
+ * copied before the flag is set.
+ */
+ __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
+ vdpa_dev = dev->vdpa_dev;
+ if (vdpa_dev && vdpa_dev->ops->migration_done)
+ vdpa_dev->ops->migration_done(dev->vid);
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+
+ if (validate_msg_fds(msg, 0) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ if (msg->payload.u64 < VIRTIO_MIN_MTU ||
+ msg->payload.u64 > VIRTIO_MAX_MTU) {
+ VHOST_LOG_CONFIG(ERR, "Invalid MTU size (%"PRIu64")\n",
+ msg->payload.u64);
+
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ dev->mtu = msg->payload.u64;
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+ int fd = msg->fds[0];
+
+ if (validate_msg_fds(msg, 1) != 0)
+ return RTE_VHOST_MSG_RESULT_ERR;
+
+ if (fd < 0) {
+ VHOST_LOG_CONFIG(ERR,
+ "Invalid file descriptor for slave channel (%d)\n",
+ fd);
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ if (dev->slave_req_fd >= 0)
+ close(dev->slave_req_fd);
+
+ dev->slave_req_fd = fd;
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
+{
+ struct vhost_vring_addr *ra;
+ uint64_t start, end, len;
+
+ start = imsg->iova;
+ end = start + imsg->size;
+
+ ra = &vq->ring_addrs;
+ len = sizeof(struct vring_desc) * vq->size;
+ if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
+ return 1;
+
+ len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
+ if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
+ return 1;
+
+ len = sizeof(struct vring_used) +
+ sizeof(struct vring_used_elem) * vq->size;
+ if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
+ return 1;
+
+ if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
+ len = sizeof(uint64_t);
+ if (ra->log_guest_addr < end &&
+ (ra->log_guest_addr + len) > start)
+ return 1;
+ }
+
+ return 0;
+}