4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <linux/virtio_ring.h>
39 #include <linux/virtio_net.h>
41 #include <rte_atomic.h>
42 #include <rte_memory.h>
43 #include <rte_memzone.h>
44 #include <rte_mempool.h>
47 #include "virtio_logs.h"
49 /* The alignment to use between consumer and producer parts of vring. */
50 #define VIRTIO_PCI_VRING_ALIGN 4096
53 * Address translatio is between gva<->hva,
54 * rather than gpa<->hva in virito spec.
56 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
57 rte_pktmbuf_mtod(mb, uint64_t)
59 enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
62 * The maximum virtqueue size is 2^15. Use that value as the end of
63 * descriptor chain terminator since it will never be a valid index
64 * in the descriptor table. This is used to verify we are correctly
65 * handling vq_free_cnt.
67 #define VQ_RING_DESC_CHAIN_END 32768
69 #define VIRTQUEUE_MAX_NAME_SZ 32
71 struct pmd_internals {
72 struct rte_eth_stats eth_stats;
79 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
80 struct rte_mempool *mpool; /**< mempool for mbuf allocation */
81 uint16_t queue_id; /**< DPDK queue index. */
82 uint16_t vq_queue_index; /**< PCI queue index */
83 uint8_t port_id; /**< Device port identifier. */
85 void *vq_ring_virt_mem; /**< virtual address of vring*/
89 struct vring vq_ring; /**< vring keeping desc, used and avail */
90 struct pmd_internals *internals; /**< virtio device internal info. */
91 uint16_t vq_nentries; /**< vring desc numbers */
92 uint16_t vq_desc_head_idx;
93 uint16_t vq_free_cnt; /**< num of desc available */
94 uint16_t vq_used_cons_idx; /**< Last consumed desc in used table, trails vq_ring.used->idx*/
96 struct vq_desc_extra {
99 } vq_descx[0] __rte_cache_aligned;
103 #ifdef RTE_LIBRTE_XENVIRT_DEBUG_DUMP
104 #define VIRTQUEUE_DUMP(vq) do { \
105 uint16_t used_idx, nused; \
106 used_idx = (vq)->vq_ring.used->idx; \
107 nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
108 PMD_INIT_LOG(DEBUG, \
109 "VQ: %s - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
110 " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
111 " avail.flags=0x%x; used.flags=0x%x\n", \
112 (vq)->vq_name, (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
113 (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
114 (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
115 (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
118 #define VIRTQUEUE_DUMP(vq) do { } while (0)
123 * Dump virtqueue internal structures, for debug purpose only.
125 void virtqueue_dump(struct virtqueue *vq);
128 * Get all mbufs to be freed.
130 struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
132 static inline int __attribute__((always_inline))
133 virtqueue_full(const struct virtqueue *vq)
135 return (vq->vq_free_cnt == 0);
138 #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
140 static inline void __attribute__((always_inline))
141 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
145 * Place the head of the descriptor chain into the next slot and make
146 * it usable to the host. The chain is made available now rather than
147 * deferring to virtqueue_notify() in the hopes that if the host is
148 * currently running on another CPU, we can keep it processing the new
151 avail_idx = (uint16_t)(vq->vq_ring.avail->idx & (vq->vq_nentries - 1));
152 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
153 rte_compiler_barrier(); /* wmb , for IA memory model barrier is enough*/
154 vq->vq_ring.avail->idx++;
157 static inline void __attribute__((always_inline))
158 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
160 struct vring_desc *dp;
161 struct vq_desc_extra *dxp;
163 dp = &vq->vq_ring.desc[desc_idx];
164 dxp = &vq->vq_descx[desc_idx];
165 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
166 while (dp->flags & VRING_DESC_F_NEXT) {
167 dp = &vq->vq_ring.desc[dp->next];
172 * We must append the existing free chain, if any, to the end of
173 * newly freed chain. If the virtqueue was completely used, then
174 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
176 dp->next = vq->vq_desc_head_idx;
177 vq->vq_desc_head_idx = desc_idx;
180 static inline int __attribute__((always_inline))
181 virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie)
183 const uint16_t needed = 1;
184 const uint16_t head_idx = rxvq->vq_desc_head_idx;
185 struct vring_desc *start_dp = rxvq->vq_ring.desc;
186 struct vq_desc_extra *dxp;
188 if (unlikely(rxvq->vq_free_cnt == 0))
190 if (unlikely(rxvq->vq_free_cnt < needed))
192 if (unlikely(head_idx >= rxvq->vq_nentries))
195 dxp = &rxvq->vq_descx[head_idx];
196 dxp->cookie = (void *)cookie;
197 dxp->ndescs = needed;
199 start_dp[head_idx].addr =
200 (uint64_t) ((uint64_t)cookie->buf_addr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
201 start_dp[head_idx].len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
202 start_dp[head_idx].flags = VRING_DESC_F_WRITE;
203 rxvq->vq_desc_head_idx = start_dp[head_idx].next;
204 rxvq->vq_free_cnt = (uint16_t)(rxvq->vq_free_cnt - needed);
205 vq_ring_update_avail(rxvq, head_idx);
210 static inline int __attribute__((always_inline))
211 virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
214 const uint16_t needed = 2;
215 struct vring_desc *start_dp = txvq->vq_ring.desc;
216 uint16_t head_idx = txvq->vq_desc_head_idx;
217 uint16_t idx = head_idx;
218 struct vq_desc_extra *dxp;
220 if (unlikely(txvq->vq_free_cnt == 0))
222 if (unlikely(txvq->vq_free_cnt < needed))
224 if (unlikely(head_idx >= txvq->vq_nentries))
227 dxp = &txvq->vq_descx[idx];
228 dxp->cookie = (void *)cookie;
229 dxp->ndescs = needed;
231 start_dp = txvq->vq_ring.desc;
232 start_dp[idx].addr = 0;
234 * TODO: save one desc here?
236 start_dp[idx].len = sizeof(struct virtio_net_hdr);
237 start_dp[idx].flags = VRING_DESC_F_NEXT;
238 start_dp[idx].addr = (uintptr_t)NULL;
239 idx = start_dp[idx].next;
240 start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie);
241 start_dp[idx].len = cookie->data_len;
242 start_dp[idx].flags = 0;
243 idx = start_dp[idx].next;
244 txvq->vq_desc_head_idx = idx;
245 txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
246 vq_ring_update_avail(txvq, head_idx);
251 static inline uint16_t __attribute__((always_inline))
252 virtqueue_dequeue_burst(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num)
254 struct vring_used_elem *uep;
255 struct rte_mbuf *cookie;
256 uint16_t used_idx, desc_idx;
258 /* Caller does the check */
259 for (i = 0; i < num ; i ++) {
260 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
261 uep = &vq->vq_ring.used->ring[used_idx];
262 desc_idx = (uint16_t) uep->id;
263 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
264 if (unlikely(cookie == NULL)) {
265 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
266 vq->vq_used_cons_idx);
267 RTE_LOG(ERR, PMD, "%s: inconsistent (%u, %u)\n", __func__, used_idx , desc_idx);
272 vq->vq_used_cons_idx++;
273 vq_ring_free_chain(vq, desc_idx);
274 vq->vq_descx[desc_idx].cookie = NULL;
279 #endif /* _VIRTQUEUE_H_ */