+ rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
+static int
+vring_conf_update(int vid, struct rte_eth_dev *eth_dev, uint16_t vring_id)
+{
+ struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
+ struct pmd_internal *internal = eth_dev->data->dev_private;
+ struct vhost_queue *vq;
+ struct rte_vhost_vring vring;
+ int rx_idx = vring_id % 2 ? (vring_id - 1) >> 1 : -1;
+ int ret = 0;
+
+ /*
+ * The vring kickfd may be changed after the new device notification.
+ * Update it when the vring state is updated.
+ */
+ if (rx_idx >= 0 && rx_idx < eth_dev->data->nb_rx_queues &&
+ rte_atomic32_read(&internal->dev_attached) &&
+ rte_atomic32_read(&internal->started) &&
+ dev_conf->intr_conf.rxq) {
+ ret = rte_vhost_get_vhost_vring(vid, vring_id, &vring);
+ if (ret) {
+ VHOST_LOG(ERR, "Failed to get vring %d information.\n",
+ vring_id);
+ return ret;
+ }
+
+ if (rte_intr_efds_index_set(eth_dev->intr_handle, rx_idx,
+ vring.kickfd))
+ return -rte_errno;
+
+ vq = eth_dev->data->rx_queues[rx_idx];
+ if (!vq) {
+ VHOST_LOG(ERR, "rxq%d is not setup yet\n", rx_idx);
+ return -1;
+ }
+
+ rte_spinlock_lock(&vq->intr_lock);
+ if (vq->intr_enable)
+ ret = eth_vhost_update_intr(eth_dev, rx_idx);
+ rte_spinlock_unlock(&vq->intr_lock);
+ }
+
+ return ret;