4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _VIRTIO_RING_H_
35 #define _VIRTIO_RING_H_
39 #include <rte_common.h>
41 /* This marks a buffer as continuing via the next field. */
42 #define VRING_DESC_F_NEXT 1
43 /* This marks a buffer as write-only (otherwise read-only). */
44 #define VRING_DESC_F_WRITE 2
45 /* This means the buffer contains a list of buffer descriptors. */
46 #define VRING_DESC_F_INDIRECT 4
48 /* The Host uses this in used->flags to advise the Guest: don't kick me
49 * when you add a buffer. It's unreliable, so it's simply an
50 * optimization. Guest will still kick if it's out of buffers. */
51 #define VRING_USED_F_NO_NOTIFY 1
52 /* The Guest uses this in avail->flags to advise the Host: don't
53 * interrupt me when you consume a buffer. It's unreliable, so it's
54 * simply an optimization. */
55 #define VRING_AVAIL_F_NO_INTERRUPT 1
57 /* VirtIO ring descriptors: 16 bytes.
58 * These can chain together via "next". */
60 uint64_t addr; /* Address (guest-physical). */
61 uint32_t len; /* Length. */
62 uint16_t flags; /* The flags as indicated above. */
63 uint16_t next; /* We chain unused descriptors via this. */
72 /* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
73 struct vring_used_elem {
74 /* Index of start of used descriptor chain. */
76 /* Total length of the descriptor chain which was written to. */
82 volatile uint16_t idx;
83 struct vring_used_elem ring[0];
88 struct vring_desc *desc;
89 struct vring_avail *avail;
90 struct vring_used *used;
93 /* The standard layout for the ring is a continuous chunk of memory which
94 * looks like this. We assume num is a power of 2.
97 * // The actual descriptors (16 bytes each)
98 * struct vring_desc desc[num];
100 * // A ring of available descriptor heads with free-running index.
103 * __u16 available[num];
104 * __u16 used_event_idx;
106 * // Padding to the next align boundary.
109 * // A ring of used descriptor heads with free-running index.
112 * struct vring_used_elem used[num];
113 * __u16 avail_event_idx;
116 * NOTE: for VirtIO PCI, align is 4096.
120 * We publish the used event index at the end of the available ring, and vice
121 * versa. They are at the end for backwards compatibility.
123 #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
124 #define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
127 vring_size(unsigned int num, unsigned long align)
131 size = num * sizeof(struct vring_desc);
132 size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
133 size = RTE_ALIGN_CEIL(size, align);
134 size += sizeof(struct vring_used) +
135 (num * sizeof(struct vring_used_elem));
140 vring_init(struct vring *vr, unsigned int num, uint8_t *p,
144 vr->desc = (struct vring_desc *) p;
145 vr->avail = (struct vring_avail *) (p +
146 num * sizeof(struct vring_desc));
148 RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
152 * The following is used with VIRTIO_RING_F_EVENT_IDX.
153 * Assuming a given event_idx value from the other size, if we have
154 * just incremented index from old to new_idx, should we trigger an
158 vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
160 return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
163 #endif /* _VIRTIO_RING_H_ */