+static __rte_always_inline void
+drain_vhost(struct vhost_dev *vdev)
+{
+ uint16_t ret;
+ uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid;
+ uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
+ struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+
+ if (builtin_net_driver) {
+ ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
+ } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
+ uint16_t enqueue_fail = 0;
+ int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
+
+ complete_async_pkts(vdev);
+ ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);
+ __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
+
+ enqueue_fail = nr_xmit - ret;
+ if (enqueue_fail)
+ free_pkts(&m[ret], nr_xmit - ret);
+ } else {
+ ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+ m, nr_xmit);
+ }
+
+ if (enable_stats) {
+ __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
+ __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(&vdev->stats.rx_atomic, ret,
+ __ATOMIC_SEQ_CST);
+ }
+
+ if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled)
+ free_pkts(m, nr_xmit);
+}
+
+static __rte_always_inline void
+drain_vhost_table(void)
+{
+ uint16_t lcore_id = rte_lcore_id();
+ struct vhost_bufftable *vhost_txq;
+ struct vhost_dev *vdev;
+ uint64_t cur_tsc;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (unlikely(vdev->remove == 1))
+ continue;
+
+ vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + vdev->vid];
+
+ cur_tsc = rte_rdtsc();
+ if (unlikely(cur_tsc - vhost_txq->pre_tsc
+ > MBUF_TABLE_DRAIN_TSC)) {
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
+ "Vhost TX queue drained after timeout with burst size %u\n",
+ vhost_txq->len);
+ drain_vhost(vdev);
+ vhost_txq->len = 0;
+ vhost_txq->pre_tsc = cur_tsc;
+ }
+ }
+}
+