4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <linux/virtio_ring.h>
39 #include <linux/virtio_net.h>
41 #include <rte_atomic.h>
42 #include <rte_memory.h>
43 #include <rte_memzone.h>
44 #include <rte_mempool.h>
46 #include "virtio_logs.h"
50 /* The alignment to use between consumer and producer parts of vring. */
51 #define VIRTIO_PCI_VRING_ALIGN 4096
54 * Address translatio is between gva<->hva,
55 * rather than gpa<->hva in virito spec.
57 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
58 rte_pktmbuf_mtod(mb, uint64_t)
60 enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
63 * The maximum virtqueue size is 2^15. Use that value as the end of
64 * descriptor chain terminator since it will never be a valid index
65 * in the descriptor table. This is used to verify we are correctly
66 * handling vq_free_cnt.
68 #define VQ_RING_DESC_CHAIN_END 32768
70 #define VIRTQUEUE_MAX_NAME_SZ 32
72 struct pmd_internals {
73 struct rte_eth_stats eth_stats;
80 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
81 struct rte_mempool *mpool; /**< mempool for mbuf allocation */
82 uint16_t queue_id; /**< DPDK queue index. */
83 uint16_t vq_queue_index; /**< PCI queue index */
84 uint8_t port_id; /**< Device port identifier. */
86 void *vq_ring_virt_mem; /**< virtual address of vring*/
90 struct vring vq_ring; /**< vring keeping desc, used and avail */
91 struct pmd_internals *internals; /**< virtio device internal info. */
92 uint16_t vq_nentries; /**< vring desc numbers */
93 uint16_t vq_desc_head_idx;
94 uint16_t vq_free_cnt; /**< num of desc available */
95 uint16_t vq_used_cons_idx; /**< Last consumed desc in used table, trails vq_ring.used->idx*/
97 struct vq_desc_extra {
100 } vq_descx[0] __rte_cache_aligned;
104 #ifdef RTE_LIBRTE_XENVIRT_DEBUG_DUMP
105 #define VIRTQUEUE_DUMP(vq) do { \
106 uint16_t used_idx, nused; \
107 used_idx = (vq)->vq_ring.used->idx; \
108 nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
109 PMD_INIT_LOG(DEBUG, \
110 "VQ: %s - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
111 " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
112 " avail.flags=0x%x; used.flags=0x%x\n", \
113 (vq)->vq_name, (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
114 (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
115 (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
116 (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
119 #define VIRTQUEUE_DUMP(vq) do { } while (0)
124 * Dump virtqueue internal structures, for debug purpose only.
126 void virtqueue_dump(struct virtqueue *vq);
129 * Get all mbufs to be freed.
131 struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
133 static inline int __attribute__((always_inline))
134 virtqueue_full(const struct virtqueue *vq)
136 return (vq->vq_free_cnt == 0);
139 #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
141 static inline void __attribute__((always_inline))
142 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
146 * Place the head of the descriptor chain into the next slot and make
147 * it usable to the host. The chain is made available now rather than
148 * deferring to virtqueue_notify() in the hopes that if the host is
149 * currently running on another CPU, we can keep it processing the new
152 avail_idx = (uint16_t)(vq->vq_ring.avail->idx & (vq->vq_nentries - 1));
153 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
155 vq->vq_ring.avail->idx++;
158 static inline void __attribute__((always_inline))
159 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
161 struct vring_desc *dp;
162 struct vq_desc_extra *dxp;
164 dp = &vq->vq_ring.desc[desc_idx];
165 dxp = &vq->vq_descx[desc_idx];
166 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
167 while (dp->flags & VRING_DESC_F_NEXT) {
168 dp = &vq->vq_ring.desc[dp->next];
173 * We must append the existing free chain, if any, to the end of
174 * newly freed chain. If the virtqueue was completely used, then
175 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
177 dp->next = vq->vq_desc_head_idx;
178 vq->vq_desc_head_idx = desc_idx;
181 static inline int __attribute__((always_inline))
182 virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie)
184 const uint16_t needed = 1;
185 const uint16_t head_idx = rxvq->vq_desc_head_idx;
186 struct vring_desc *start_dp = rxvq->vq_ring.desc;
187 struct vq_desc_extra *dxp;
189 if (unlikely(rxvq->vq_free_cnt == 0))
191 if (unlikely(rxvq->vq_free_cnt < needed))
193 if (unlikely(head_idx >= rxvq->vq_nentries))
196 dxp = &rxvq->vq_descx[head_idx];
197 dxp->cookie = (void *)cookie;
198 dxp->ndescs = needed;
200 start_dp[head_idx].addr =
201 (uint64_t) ((uint64_t)cookie->buf_addr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
202 start_dp[head_idx].len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
203 start_dp[head_idx].flags = VRING_DESC_F_WRITE;
204 rxvq->vq_desc_head_idx = start_dp[head_idx].next;
205 rxvq->vq_free_cnt = (uint16_t)(rxvq->vq_free_cnt - needed);
206 vq_ring_update_avail(rxvq, head_idx);
211 static inline int __attribute__((always_inline))
212 virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
215 const uint16_t needed = 2;
216 struct vring_desc *start_dp = txvq->vq_ring.desc;
217 uint16_t head_idx = txvq->vq_desc_head_idx;
218 uint16_t idx = head_idx;
219 struct vq_desc_extra *dxp;
221 if (unlikely(txvq->vq_free_cnt == 0))
223 if (unlikely(txvq->vq_free_cnt < needed))
225 if (unlikely(head_idx >= txvq->vq_nentries))
228 dxp = &txvq->vq_descx[idx];
229 dxp->cookie = (void *)cookie;
230 dxp->ndescs = needed;
232 start_dp = txvq->vq_ring.desc;
233 start_dp[idx].addr = 0;
235 * TODO: save one desc here?
237 start_dp[idx].len = sizeof(struct virtio_net_hdr);
238 start_dp[idx].flags = VRING_DESC_F_NEXT;
239 start_dp[idx].addr = (uintptr_t)NULL;
240 idx = start_dp[idx].next;
241 start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie);
242 start_dp[idx].len = cookie->data_len;
243 start_dp[idx].flags = 0;
244 idx = start_dp[idx].next;
245 txvq->vq_desc_head_idx = idx;
246 txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
247 vq_ring_update_avail(txvq, head_idx);
252 static inline uint16_t __attribute__((always_inline))
253 virtqueue_dequeue_burst(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num)
255 struct vring_used_elem *uep;
256 struct rte_mbuf *cookie;
257 uint16_t used_idx, desc_idx;
259 /* Caller does the check */
260 for (i = 0; i < num ; i ++) {
261 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
262 uep = &vq->vq_ring.used->ring[used_idx];
263 desc_idx = (uint16_t) uep->id;
264 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
265 if (unlikely(cookie == NULL)) {
266 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
267 vq->vq_used_cons_idx);
268 RTE_LOG(ERR, PMD, "%s: inconsistent (%u, %u)\n", __func__, used_idx , desc_idx);
273 vq->vq_used_cons_idx++;
274 vq_ring_free_chain(vq, desc_idx);
275 vq->vq_descx[desc_idx].cookie = NULL;
280 #endif /* _VIRTQUEUE_H_ */