+static inline void
+free_pkts(struct rte_mbuf **pkts, uint16_t n)
+{
+ while (n--)
+ rte_pktmbuf_free(pkts[n]);
+}
+
+static __rte_always_inline void
+complete_async_pkts(struct vhost_dev *vdev)
+{
+ struct rte_mbuf *p_cpl[MAX_PKT_BURST];
+ uint16_t complete_count;
+ int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
+
+ complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
+ VIRTIO_RXQ, p_cpl, MAX_PKT_BURST, dma_id, 0);
+ if (complete_count) {
+ free_pkts(p_cpl, complete_count);
+ __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST);
+ }
+
+}
+
+static __rte_always_inline void
+sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
+ struct rte_mbuf *m)
+{
+ uint16_t ret;
+
+ if (builtin_net_driver) {
+ ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
+ } else {
+ ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
+ }
+
+ if (enable_stats) {
+ __atomic_add_fetch(&dst_vdev->stats.rx_total_atomic, 1,
+ __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(&dst_vdev->stats.rx_atomic, ret,
+ __ATOMIC_SEQ_CST);
+ src_vdev->stats.tx_total++;
+ src_vdev->stats.tx += ret;
+ }
+}
+
+static __rte_always_inline void
+drain_vhost(struct vhost_dev *vdev)
+{
+ uint16_t ret;
+ uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid;
+ uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
+ struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+
+ if (builtin_net_driver) {
+ ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
+ } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
+ uint16_t enqueue_fail = 0;
+ int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
+
+ complete_async_pkts(vdev);
+ ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);
+ __atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
+
+ enqueue_fail = nr_xmit - ret;
+ if (enqueue_fail)
+ free_pkts(&m[ret], nr_xmit - ret);
+ } else {
+ ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+ m, nr_xmit);
+ }
+
+ if (enable_stats) {
+ __atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
+ __ATOMIC_SEQ_CST);
+ __atomic_add_fetch(&vdev->stats.rx_atomic, ret,
+ __ATOMIC_SEQ_CST);
+ }
+
+ if (!dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled)
+ free_pkts(m, nr_xmit);
+}
+
+static __rte_always_inline void
+drain_vhost_table(void)
+{
+ uint16_t lcore_id = rte_lcore_id();
+ struct vhost_bufftable *vhost_txq;
+ struct vhost_dev *vdev;
+ uint64_t cur_tsc;
+
+ TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
+ if (unlikely(vdev->remove == 1))
+ continue;
+
+ vhost_txq = vhost_txbuff[lcore_id * RTE_MAX_VHOST_DEVICE + vdev->vid];
+
+ cur_tsc = rte_rdtsc();
+ if (unlikely(cur_tsc - vhost_txq->pre_tsc
+ > MBUF_TABLE_DRAIN_TSC)) {
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
+ "Vhost TX queue drained after timeout with burst size %u\n",
+ vhost_txq->len);
+ drain_vhost(vdev);
+ vhost_txq->len = 0;
+ vhost_txq->pre_tsc = cur_tsc;
+ }
+ }
+}
+