+ __atomic_add_fetch(&vring->used->idx, 1, __ATOMIC_RELAXED);
+ }
+}
+
+int
+virtio_user_dev_set_status(struct virtio_user_dev *dev, uint8_t status)
+{
+ int ret;
+
+ pthread_mutex_lock(&dev->mutex);
+ dev->status = status;
+ ret = dev->ops->set_status(dev, status);
+ if (ret && ret != -ENOTSUP)
+ PMD_INIT_LOG(ERR, "(%s) Failed to set backend status", dev->path);
+
+ pthread_mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+int
+virtio_user_dev_update_status(struct virtio_user_dev *dev)
+{
+ int ret;
+ uint8_t status;
+
+ pthread_mutex_lock(&dev->mutex);
+
+ ret = dev->ops->get_status(dev, &status);
+ if (!ret) {
+ dev->status = status;
+ PMD_INIT_LOG(DEBUG, "Updated Device Status(0x%08x):\n"
+ "\t-RESET: %u\n"
+ "\t-ACKNOWLEDGE: %u\n"
+ "\t-DRIVER: %u\n"
+ "\t-DRIVER_OK: %u\n"
+ "\t-FEATURES_OK: %u\n"
+ "\t-DEVICE_NEED_RESET: %u\n"
+ "\t-FAILED: %u",
+ dev->status,
+ (dev->status == VIRTIO_CONFIG_STATUS_RESET),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_ACK),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_DEV_NEED_RESET),
+ !!(dev->status & VIRTIO_CONFIG_STATUS_FAILED));
+ } else if (ret != -ENOTSUP) {
+ PMD_INIT_LOG(ERR, "(%s) Failed to get backend status", dev->path);
+ }
+
+ pthread_mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+int
+virtio_user_dev_update_link_state(struct virtio_user_dev *dev)
+{
+ if (dev->ops->update_link_state)
+ return dev->ops->update_link_state(dev);
+
+ return 0;
+}
+
+static void
+virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev)
+{
+ struct virtio_user_dev *dev = eth_dev->data->dev_private;
+ struct virtio_hw *hw = &dev->hw;
+ struct virtnet_rx *rxvq;
+ struct virtnet_tx *txvq;
+ uint16_t i;
+
+ /* Add lock to avoid queue contention. */
+ rte_spinlock_lock(&hw->state_lock);
+ hw->started = 0;
+
+ /*
+ * Waiting for datapath to complete before resetting queues.
+ * 1 ms should be enough for the ongoing Tx/Rx function to finish.
+ */
+ rte_delay_ms(1);
+
+ /* Vring reset for each Tx queue and Rx queue. */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxvq = eth_dev->data->rx_queues[i];
+ virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq));
+ virtio_dev_rx_queue_setup_finish(eth_dev, i);
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ txvq = eth_dev->data->tx_queues[i];
+ virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq));
+ }
+
+ hw->started = 1;
+ rte_spinlock_unlock(&hw->state_lock);
+}
+
+void
+virtio_user_dev_delayed_disconnect_handler(void *param)
+{
+ struct virtio_user_dev *dev = param;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
+
+ if (rte_intr_disable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt disable failed");
+ return;
+ }
+ PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
+ eth_dev->intr_handle->fd);
+ if (rte_intr_callback_unregister(eth_dev->intr_handle,
+ virtio_interrupt_handler,
+ eth_dev) != 1)
+ PMD_DRV_LOG(ERR, "interrupt unregister failed");
+
+ if (dev->is_server) {
+ if (dev->ops->server_disconnect)
+ dev->ops->server_disconnect(dev);
+
+ eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
+
+ PMD_DRV_LOG(DEBUG, "Registering intr fd: %d",
+ eth_dev->intr_handle->fd);
+
+ if (rte_intr_callback_register(eth_dev->intr_handle,
+ virtio_interrupt_handler,
+ eth_dev))
+ PMD_DRV_LOG(ERR, "interrupt register failed");
+
+ if (rte_intr_enable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return;
+ }
+ }
+}
+
+static void
+virtio_user_dev_delayed_intr_reconfig_handler(void *param)
+{
+ struct virtio_user_dev *dev = param;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
+
+ PMD_DRV_LOG(DEBUG, "Unregistering intr fd: %d",
+ eth_dev->intr_handle->fd);
+
+ if (rte_intr_callback_unregister(eth_dev->intr_handle,
+ virtio_interrupt_handler,
+ eth_dev) != 1)
+ PMD_DRV_LOG(ERR, "interrupt unregister failed");
+
+ eth_dev->intr_handle->fd = dev->ops->get_intr_fd(dev);
+
+ PMD_DRV_LOG(DEBUG, "Registering intr fd: %d", eth_dev->intr_handle->fd);
+
+ if (rte_intr_callback_register(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev))
+ PMD_DRV_LOG(ERR, "interrupt register failed");
+
+ if (rte_intr_enable(eth_dev->intr_handle) < 0)
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+}
+
+int
+virtio_user_dev_server_reconnect(struct virtio_user_dev *dev)
+{
+ int ret, old_status;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->hw.port_id];
+ struct virtio_hw *hw = &dev->hw;
+
+ if (!dev->ops->server_reconnect) {
+ PMD_DRV_LOG(ERR, "(%s) Missing server reconnect callback", dev->path);
+ return -1;
+ }
+
+ if (dev->ops->server_reconnect(dev)) {
+ PMD_DRV_LOG(ERR, "(%s) Reconnect callback call failed", dev->path);
+ return -1;
+ }
+
+ old_status = dev->status;
+
+ virtio_reset(hw);
+
+ virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+ virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+
+ if (dev->ops->get_features(dev, &dev->device_features) < 0) {
+ PMD_INIT_LOG(ERR, "get_features failed: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ /* unmask vhost-user unsupported features */
+ dev->device_features &= ~(dev->unsupported_features);
+
+ dev->features &= (dev->device_features | dev->frontend_features);
+
+ /* For packed ring, resetting queues is required in reconnection. */
+ if (virtio_with_packed_queue(hw) &&
+ (old_status & VIRTIO_CONFIG_STATUS_DRIVER_OK)) {
+ PMD_INIT_LOG(NOTICE, "Packets on the fly will be dropped"
+ " when packed ring reconnecting.");
+ virtio_user_dev_reset_queues_packed(eth_dev);