1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
10 #include <rte_ether.h>
13 /* Macros for printing using RTE_LOG */
14 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
15 #define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
16 #define RTE_LOGTYPE_VHOST_PORT RTE_LOGTYPE_USER3
18 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
20 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
22 struct device_statistics {
26 uint64_t rx_total_atomic;
30 struct rte_vhost_vring vr;
31 uint16_t last_avail_idx;
32 uint16_t last_used_idx;
36 /**< Number of memory regions for gpa to hpa translation. */
37 uint32_t nregions_hpa;
38 /**< Device MAC address (Obtained on first TX packet). */
39 struct rte_ether_addr mac_address;
40 /**< RX VMDQ queue number. */
42 /**< Vlan tag assigned to the pool */
44 /**< Data core that the device is added to. */
46 /**< A device is set as ready if the MAC address has been set. */
47 volatile uint8_t ready;
48 /**< Device is marked for removal from the data core. */
49 volatile uint8_t remove;
55 struct rte_vhost_memory *mem;
56 struct device_statistics stats;
57 TAILQ_ENTRY(vhost_dev) global_vdev_entry;
58 TAILQ_ENTRY(vhost_dev) lcore_vdev_entry;
60 #define MAX_QUEUE_PAIRS 4
61 struct vhost_queue queues[MAX_QUEUE_PAIRS * 2];
62 } __rte_cache_aligned;
64 typedef uint16_t (*vhost_enqueue_burst_t)(struct vhost_dev *dev,
65 uint16_t queue_id, struct rte_mbuf **pkts,
68 typedef uint16_t (*vhost_dequeue_burst_t)(struct vhost_dev *dev,
69 uint16_t queue_id, struct rte_mempool *mbuf_pool,
70 struct rte_mbuf **pkts, uint16_t count);
72 struct vhost_queue_ops {
73 vhost_enqueue_burst_t enqueue_pkt_burst;
74 vhost_dequeue_burst_t dequeue_pkt_burst;
77 TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev);
80 #define REQUEST_DEV_REMOVAL 1
81 #define ACK_DEV_REMOVAL 0
84 * Structure containing data core specific information.
89 /* Flag to synchronize device removal. */
90 volatile uint8_t dev_removal_flag;
92 struct vhost_dev_tailq_list vdev_list;
96 struct rte_pci_addr addr;
101 struct dma_for_vhost {
102 struct dma_info dmas[RTE_MAX_QUEUES_PER_PORT * 2];
106 /* we implement non-extra virtio net features */
107 #define VIRTIO_NET_FEATURES 0
109 void vs_vhost_net_setup(struct vhost_dev *dev);
110 void vs_vhost_net_remove(struct vhost_dev *dev);
111 uint16_t vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
112 struct rte_mbuf **pkts, uint32_t count);
114 uint16_t builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
115 struct rte_mbuf **pkts, uint32_t count);
116 uint16_t builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
117 struct rte_mempool *mbuf_pool,
118 struct rte_mbuf **pkts, uint16_t count);
119 uint16_t sync_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
120 struct rte_mbuf **pkts, uint32_t count);
121 uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
122 struct rte_mempool *mbuf_pool,
123 struct rte_mbuf **pkts, uint16_t count);
124 uint16_t async_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
125 struct rte_mbuf **pkts, uint32_t count);
126 uint16_t async_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
127 struct rte_mempool *mbuf_pool,
128 struct rte_mbuf **pkts, uint16_t count);
129 #endif /* _MAIN_H_ */