1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef _VIRTIO_RING_H_
6 #define _VIRTIO_RING_H_
10 #include <rte_common.h>
12 /* This marks a buffer as continuing via the next field. */
13 #define VRING_DESC_F_NEXT 1
14 /* This marks a buffer as write-only (otherwise read-only). */
15 #define VRING_DESC_F_WRITE 2
16 /* This means the buffer contains a list of buffer descriptors. */
17 #define VRING_DESC_F_INDIRECT 4
18 /* This flag means the descriptor was made available by the driver */
19 #define VRING_DESC_F_AVAIL(b) ((uint16_t)(b) << 7)
20 /* This flag means the descriptor was used by the device */
21 #define VRING_DESC_F_USED(b) ((uint16_t)(b) << 15)
23 /* The Host uses this in used->flags to advise the Guest: don't kick me
24 * when you add a buffer. It's unreliable, so it's simply an
25 * optimization. Guest will still kick if it's out of buffers. */
26 #define VRING_USED_F_NO_NOTIFY 1
27 /* The Guest uses this in avail->flags to advise the Host: don't
28 * interrupt me when you consume a buffer. It's unreliable, so it's
29 * simply an optimization. */
30 #define VRING_AVAIL_F_NO_INTERRUPT 1
32 /* VirtIO ring descriptors: 16 bytes.
33 * These can chain together via "next". */
35 uint64_t addr; /* Address (guest-physical). */
36 uint32_t len; /* Length. */
37 uint16_t flags; /* The flags as indicated above. */
38 uint16_t next; /* We chain unused descriptors via this. */
47 /* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
48 struct vring_used_elem {
49 /* Index of start of used descriptor chain. */
51 /* Total length of the descriptor chain which was written to. */
57 volatile uint16_t idx;
58 struct vring_used_elem ring[0];
61 /* For support of packed virtqueues in Virtio 1.1 the format of descriptors
64 struct vring_packed_desc {
71 #define RING_EVENT_FLAGS_ENABLE 0x0
72 #define RING_EVENT_FLAGS_DISABLE 0x1
73 #define RING_EVENT_FLAGS_DESC 0x2
74 struct vring_packed_desc_event {
75 uint16_t desc_event_off_wrap;
76 uint16_t desc_event_flags;
81 struct vring_packed_desc *desc_packed;
82 struct vring_packed_desc_event *driver_event;
83 struct vring_packed_desc_event *device_event;
89 struct vring_desc *desc;
90 struct vring_avail *avail;
91 struct vring_used *used;
94 /* The standard layout for the ring is a continuous chunk of memory which
95 * looks like this. We assume num is a power of 2.
98 * // The actual descriptors (16 bytes each)
99 * struct vring_desc desc[num];
101 * // A ring of available descriptor heads with free-running index.
104 * __u16 available[num];
105 * __u16 used_event_idx;
107 * // Padding to the next align boundary.
110 * // A ring of used descriptor heads with free-running index.
113 * struct vring_used_elem used[num];
114 * __u16 avail_event_idx;
117 * NOTE: for VirtIO PCI, align is 4096.
121 * We publish the used event index at the end of the available ring, and vice
122 * versa. They are at the end for backwards compatibility.
124 #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
125 #define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
128 vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
132 if (vtpci_packed_queue(hw)) {
133 size = num * sizeof(struct vring_packed_desc);
134 size += sizeof(struct vring_packed_desc_event);
135 size = RTE_ALIGN_CEIL(size, align);
136 size += sizeof(struct vring_packed_desc_event);
140 size = num * sizeof(struct vring_desc);
141 size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
142 size = RTE_ALIGN_CEIL(size, align);
143 size += sizeof(struct vring_used) +
144 (num * sizeof(struct vring_used_elem));
148 vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
152 vr->desc = (struct vring_desc *) p;
153 vr->avail = (struct vring_avail *) (p +
154 num * sizeof(struct vring_desc));
156 RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
160 vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
164 vr->desc_packed = (struct vring_packed_desc *)p;
165 vr->driver_event = (struct vring_packed_desc_event *)(p +
166 vr->num * sizeof(struct vring_packed_desc));
167 vr->device_event = (struct vring_packed_desc_event *)
168 RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event +
169 sizeof(struct vring_packed_desc_event)), align);
173 * The following is used with VIRTIO_RING_F_EVENT_IDX.
174 * Assuming a given event_idx value from the other size, if we have
175 * just incremented index from old to new_idx, should we trigger an
179 vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
181 return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
184 #endif /* _VIRTIO_RING_H_ */