1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 * Device specific vhost lib
13 #include <rte_malloc.h>
17 static struct rte_vdpa_device *vdpa_devices[MAX_VHOST_DEVICE];
18 static uint32_t vdpa_device_num;
21 is_same_vdpa_device(struct rte_vdpa_dev_addr *a,
22 struct rte_vdpa_dev_addr *b)
26 if (a->type != b->type)
31 if (a->pci_addr.domain != b->pci_addr.domain ||
32 a->pci_addr.bus != b->pci_addr.bus ||
33 a->pci_addr.devid != b->pci_addr.devid ||
34 a->pci_addr.function != b->pci_addr.function)
45 rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
46 struct rte_vdpa_dev_ops *ops)
48 struct rte_vdpa_device *dev;
49 char device_name[MAX_VDPA_NAME_LEN];
52 if (vdpa_device_num >= MAX_VHOST_DEVICE)
55 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
56 dev = vdpa_devices[i];
57 if (dev && is_same_vdpa_device(&dev->addr, addr))
61 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
62 if (vdpa_devices[i] == NULL)
66 if (i == MAX_VHOST_DEVICE)
69 sprintf(device_name, "vdpa-dev-%d", i);
70 dev = rte_zmalloc(device_name, sizeof(struct rte_vdpa_device),
75 memcpy(&dev->addr, addr, sizeof(struct rte_vdpa_dev_addr));
77 vdpa_devices[i] = dev;
84 rte_vdpa_unregister_device(int did)
86 if (did < 0 || did >= MAX_VHOST_DEVICE || vdpa_devices[did] == NULL)
89 rte_free(vdpa_devices[did]);
90 vdpa_devices[did] = NULL;
97 rte_vdpa_find_device_id(struct rte_vdpa_dev_addr *addr)
99 struct rte_vdpa_device *dev;
102 for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
103 dev = vdpa_devices[i];
104 if (dev && is_same_vdpa_device(&dev->addr, addr))
111 struct rte_vdpa_device *
112 rte_vdpa_get_device(int did)
114 if (did < 0 || did >= MAX_VHOST_DEVICE)
117 return vdpa_devices[did];
121 rte_vdpa_get_device_num(void)
123 return vdpa_device_num;
127 invalid_desc_check(struct virtio_net *dev, struct vhost_virtqueue *vq,
128 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
130 uint64_t desc_addr, desc_chunck_len;
133 desc_chunck_len = desc_len;
134 desc_addr = vhost_iova_to_vva(dev, vq,
142 desc_len -= desc_chunck_len;
143 desc_iova += desc_chunck_len;
149 int __rte_experimental
150 rte_vdpa_relay_vring_avail(int vid, uint16_t qid, void *vring_m)
152 struct virtio_net *dev = get_device(vid);
153 uint16_t idx, idx_m, desc_id;
154 struct vring_desc desc;
155 struct vhost_virtqueue *vq;
156 struct vring_desc *desc_ring;
157 struct vring_desc *idesc = NULL;
158 struct vring *s_vring;
163 if (!dev || !vring_m)
166 if (qid >= dev->nr_vring)
169 if (vq_is_packed(dev))
172 s_vring = (struct vring *)vring_m;
173 vq = dev->virtqueue[qid];
174 idx = vq->avail->idx;
175 idx_m = s_vring->avail->idx;
176 ret = (uint16_t)(idx - idx_m);
178 while (idx_m != idx) {
179 /* avail entry copy */
180 desc_id = vq->avail->ring[idx_m & (vq->size - 1)];
181 s_vring->avail->ring[idx_m & (vq->size - 1)] = desc_id;
182 desc_ring = vq->desc;
184 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
185 dlen = vq->desc[desc_id].len;
186 desc_ring = (struct vring_desc *)(uintptr_t)
187 vhost_iova_to_vva(dev, vq,
188 vq->desc[desc_id].addr, &dlen,
190 if (unlikely(!desc_ring))
193 if (unlikely(dlen < vq->desc[idx].len)) {
194 idesc = alloc_copy_ind_table(dev, vq,
197 if (unlikely(!idesc))
206 /* check if the buf addr is within the guest memory */
208 desc = desc_ring[desc_id];
209 perm = desc.flags & VRING_DESC_F_WRITE ?
210 VHOST_ACCESS_WO : VHOST_ACCESS_RO;
211 if (invalid_desc_check(dev, vq, desc.addr, desc.len,
214 free_ind_table(idesc);
218 } while (desc.flags & VRING_DESC_F_NEXT);
220 if (unlikely(idesc)) {
221 free_ind_table(idesc);
229 s_vring->avail->idx = idx;
231 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
232 vhost_avail_event(vq) = idx;
237 int __rte_experimental
238 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
240 struct virtio_net *dev = get_device(vid);
241 uint16_t idx, idx_m, desc_id;
242 struct vhost_virtqueue *vq;
243 struct vring_desc desc;
244 struct vring_desc *desc_ring;
245 struct vring_desc *idesc = NULL;
246 struct vring *s_vring;
250 if (!dev || !vring_m)
253 if (qid >= dev->nr_vring)
256 if (vq_is_packed(dev))
259 s_vring = (struct vring *)vring_m;
260 vq = dev->virtqueue[qid];
262 idx_m = s_vring->used->idx;
263 ret = (uint16_t)(idx_m - idx);
265 while (idx != idx_m) {
266 /* copy used entry, used ring logging is not covered here */
267 vq->used->ring[idx & (vq->size - 1)] =
268 s_vring->used->ring[idx & (vq->size - 1)];
270 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
271 desc_ring = vq->desc;
273 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
274 dlen = vq->desc[desc_id].len;
275 desc_ring = (struct vring_desc *)(uintptr_t)
276 vhost_iova_to_vva(dev, vq,
277 vq->desc[desc_id].addr, &dlen,
279 if (unlikely(!desc_ring))
282 if (unlikely(dlen < vq->desc[idx].len)) {
283 idesc = alloc_copy_ind_table(dev, vq,
286 if (unlikely(!idesc))
295 /* dirty page logging for DMA writeable buffer */
297 desc = desc_ring[desc_id];
298 if (desc.flags & VRING_DESC_F_WRITE)
299 vhost_log_write(dev, desc.addr, desc.len);
301 } while (desc.flags & VRING_DESC_F_NEXT);
303 if (unlikely(idesc)) {
304 free_ind_table(idesc);
312 vq->used->idx = idx_m;
314 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
315 vring_used_event(s_vring) = idx_m;