struct vhost_queue queues[MAX_QUEUE_PAIRS * 2];
} __rte_cache_aligned;
+typedef uint16_t (*vhost_enqueue_burst_t)(struct vhost_dev *dev,
+ uint16_t queue_id, struct rte_mbuf **pkts,
+ uint32_t count);
+
+typedef uint16_t (*vhost_dequeue_burst_t)(struct vhost_dev *dev,
+ uint16_t queue_id, struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
+
+struct vhost_queue_ops {
+ vhost_enqueue_burst_t enqueue_pkt_burst;
+ vhost_dequeue_burst_t dequeue_pkt_burst;
+};
+
TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev);
struct dma_for_vhost {
struct dma_info dmas[RTE_MAX_QUEUES_PER_PORT * 2];
+ uint32_t async_flag;
};
/* we implement non-extra virtio net features */
uint16_t vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count);
-uint16_t vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
- struct rte_mempool *mbuf_pool,
- struct rte_mbuf **pkts, uint16_t count);
+uint16_t builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count);
+uint16_t builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
+uint16_t sync_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count);
+uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
+uint16_t async_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count);
+uint16_t async_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
#endif /* _MAIN_H_ */