#define MAX_VHOST_DEVICE 1024
#define IOAT_RING_SIZE 4096
-#define MAX_ENQUEUED_SIZE 512
+#define MAX_ENQUEUED_SIZE 4096
struct dma_info {
struct rte_pci_addr addr;
complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
- if (complete_count) {
- __atomic_sub_fetch(&vdev->nr_async_pkts, complete_count,
- __ATOMIC_SEQ_CST);
+ if (complete_count)
free_pkts(p_cpl, complete_count);
- }
}
static __rte_always_inline void
complete_async_pkts(vdev);
ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
- __atomic_add_fetch(&vdev->nr_async_pkts, ret - cpu_cpl_nr,
- __ATOMIC_SEQ_CST);
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);
enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
VIRTIO_RXQ, pkts, rx_count,
m_cpu_cpl, &cpu_cpl_nr);
- __atomic_add_fetch(&vdev->nr_async_pkts,
- enqueue_count - cpu_cpl_nr,
- __ATOMIC_SEQ_CST);
if (cpu_cpl_nr)
free_pkts(m_cpu_cpl, cpu_cpl_nr);