return -1;
}
- if ((dev->flags & VIRTIO_DEV_RUNNING) && dev->features != features) {
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ if (dev->features == features)
+ return 0;
+
+ /*
+ * Error out if master tries to change features while device is
+ * in running state. The exception being VHOST_F_LOG_ALL, which
+ * is enabled when the live-migration starts.
+ */
+ if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%d) features changed while device is running.\n",
+ dev->vid);
+ return -1;
+ }
+
if (dev->notify_ops->features_changed)
dev->notify_ops->features_changed(dev->vid, features);
}
(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
+ if (!(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
+ /*
+ * Remove all but first queue pair if MQ hasn't been
+ * negotiated. This is safe because the device is not
+ * running at this stage.
+ */
+ while (dev->nr_vring > 2) {
+ struct vhost_virtqueue *vq;
+
+ vq = dev->virtqueue[--dev->nr_vring];
+ if (!vq)
+ continue;
+
+ dev->virtqueue[dev->nr_vring] = NULL;
+ cleanup_vq(vq, 1);
+ free_vq(vq);
+ }
+ }
+
return 0;
}
"zero copy is force disabled\n");
dev->dequeue_zero_copy = 0;
}
+ TAILQ_INIT(&vq->zmbuf_list);
}
vq->shadow_used_ring = rte_malloc(NULL,
int oldnode, newnode;
struct virtio_net *old_dev;
struct vhost_virtqueue *old_vq, *vq;
+ struct zcopy_mbuf *new_zmbuf;
+ struct vring_used_elem *new_shadow_used_ring;
+ struct batch_copy_elem *new_batch_copy_elems;
int ret;
old_dev = dev;
return dev;
memcpy(vq, old_vq, sizeof(*vq));
+ TAILQ_INIT(&vq->zmbuf_list);
+
+ new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
+ sizeof(struct zcopy_mbuf), 0, newnode);
+ if (new_zmbuf) {
+ rte_free(vq->zmbufs);
+ vq->zmbufs = new_zmbuf;
+ }
+
+ new_shadow_used_ring = rte_malloc_socket(NULL,
+ vq->size * sizeof(struct vring_used_elem),
+ RTE_CACHE_LINE_SIZE,
+ newnode);
+ if (new_shadow_used_ring) {
+ rte_free(vq->shadow_used_ring);
+ vq->shadow_used_ring = new_shadow_used_ring;
+ }
+
+ new_batch_copy_elems = rte_malloc_socket(NULL,
+ vq->size * sizeof(struct batch_copy_elem),
+ RTE_CACHE_LINE_SIZE,
+ newnode);
+ if (new_batch_copy_elems) {
+ rte_free(vq->batch_copy_elems);
+ vq->batch_copy_elems = new_batch_copy_elems;
+ }
+
rte_free(old_vq);
}
return alloc_vring_queue(dev, vring_idx);
}
+static void
+vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
+{
+ unsigned int i = 0;
+ unsigned int vq_num = 0;
+
+ while (vq_num < dev->nr_vring) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+ if (vq) {
+ rte_spinlock_lock(&vq->access_lock);
+ vq_num++;
+ }
+ i++;
+ }
+}
+
+static void
+vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
+{
+ unsigned int i = 0;
+ unsigned int vq_num = 0;
+
+ while (vq_num < dev->nr_vring) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+ if (vq) {
+ rte_spinlock_unlock(&vq->access_lock);
+ vq_num++;
+ }
+ i++;
+ }
+}
+
int
vhost_user_msg_handler(int vid, int fd)
{
struct virtio_net *dev;
struct VhostUserMsg msg;
int ret;
+ int unlock_required = 0;
dev = get_device(vid);
if (dev == NULL)
return -1;
}
+ /*
+ * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE,
+ * since it is sent when virtio stops and device is destroyed.
+ * destroy_device waits for queues to be inactive, so it is safe.
+ * Otherwise taking the access_lock would cause a dead lock.
+ */
+ switch (msg.request.master) {
+ case VHOST_USER_SET_FEATURES:
+ case VHOST_USER_SET_PROTOCOL_FEATURES:
+ case VHOST_USER_SET_OWNER:
+ case VHOST_USER_RESET_OWNER:
+ case VHOST_USER_SET_MEM_TABLE:
+ case VHOST_USER_SET_LOG_BASE:
+ case VHOST_USER_SET_LOG_FD:
+ case VHOST_USER_SET_VRING_NUM:
+ case VHOST_USER_SET_VRING_ADDR:
+ case VHOST_USER_SET_VRING_BASE:
+ case VHOST_USER_SET_VRING_KICK:
+ case VHOST_USER_SET_VRING_CALL:
+ case VHOST_USER_SET_VRING_ERR:
+ case VHOST_USER_SET_VRING_ENABLE:
+ case VHOST_USER_SEND_RARP:
+ case VHOST_USER_NET_SET_MTU:
+ case VHOST_USER_SET_SLAVE_REQ_FD:
+ vhost_user_lock_all_queue_pairs(dev);
+ unlock_required = 1;
+ break;
+ default:
+ break;
+
+ }
+
switch (msg.request.master) {
case VHOST_USER_GET_FEATURES:
msg.payload.u64 = vhost_user_get_features(dev);
send_vhost_reply(fd, &msg);
break;
case VHOST_USER_SET_FEATURES:
- vhost_user_set_features(dev, msg.payload.u64);
+ ret = vhost_user_set_features(dev, msg.payload.u64);
+ if (ret)
+ return -1;
break;
case VHOST_USER_GET_PROTOCOL_FEATURES:
}
+ if (unlock_required)
+ vhost_user_unlock_all_queue_pairs(dev);
+
if (msg.flags & VHOST_USER_NEED_REPLY) {
msg.payload.u64 = !!ret;
msg.size = sizeof(msg.payload.u64);