1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 * Device specific vhost lib
13 #include <rte_malloc.h>
17 static struct rte_vdpa_device *vdpa_devices[MAX_VHOST_DEVICE];
18 static uint32_t vdpa_device_num;
21 is_same_vdpa_device(struct rte_vdpa_dev_addr *a,
22 struct rte_vdpa_dev_addr *b)
26 if (a->type != b->type)
31 if (a->pci_addr.domain != b->pci_addr.domain ||
32 a->pci_addr.bus != b->pci_addr.bus ||
33 a->pci_addr.devid != b->pci_addr.devid ||
34 a->pci_addr.function != b->pci_addr.function)
45 rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
46 struct rte_vdpa_dev_ops *ops)
48 struct rte_vdpa_device *dev;
49 char device_name[MAX_VDPA_NAME_LEN];
52 if (vdpa_device_num >= MAX_VHOST_DEVICE)
55 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
56 dev = vdpa_devices[i];
57 if (dev && is_same_vdpa_device(&dev->addr, addr))
61 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
62 if (vdpa_devices[i] == NULL)
66 if (i == MAX_VHOST_DEVICE)
69 sprintf(device_name, "vdpa-dev-%d", i);
70 dev = rte_zmalloc(device_name, sizeof(struct rte_vdpa_device),
75 memcpy(&dev->addr, addr, sizeof(struct rte_vdpa_dev_addr));
77 vdpa_devices[i] = dev;
84 rte_vdpa_unregister_device(int did)
86 if (did < 0 || did >= MAX_VHOST_DEVICE || vdpa_devices[did] == NULL)
89 rte_free(vdpa_devices[did]);
90 vdpa_devices[did] = NULL;
97 rte_vdpa_find_device_id(struct rte_vdpa_dev_addr *addr)
99 struct rte_vdpa_device *dev;
102 for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
103 dev = vdpa_devices[i];
104 if (dev && is_same_vdpa_device(&dev->addr, addr))
111 struct rte_vdpa_device *
112 rte_vdpa_get_device(int did)
114 if (did < 0 || did >= MAX_VHOST_DEVICE)
117 return vdpa_devices[did];
121 rte_vdpa_get_device_num(void)
123 return vdpa_device_num;
127 invalid_desc_check(struct virtio_net *dev, struct vhost_virtqueue *vq,
128 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
130 uint64_t desc_addr, desc_chunck_len;
133 desc_chunck_len = desc_len;
134 desc_addr = vhost_iova_to_vva(dev, vq,
142 desc_len -= desc_chunck_len;
143 desc_iova += desc_chunck_len;
149 int __rte_experimental
150 rte_vdpa_relay_vring_avail(int vid, uint16_t qid, void *vring_m)
152 struct virtio_net *dev = get_device(vid);
153 uint16_t idx, idx_m, desc_id;
154 struct vring_desc desc;
155 struct vhost_virtqueue *vq;
156 struct vring_desc *desc_ring;
157 struct vring_desc *idesc = NULL;
158 struct vring *s_vring;
164 if (!dev || !vring_m)
167 if (qid >= dev->nr_vring)
170 if (vq_is_packed(dev))
173 s_vring = (struct vring *)vring_m;
174 vq = dev->virtqueue[qid];
175 idx = vq->avail->idx;
176 idx_m = s_vring->avail->idx;
177 ret = (uint16_t)(idx - idx_m);
179 while (idx_m != idx) {
180 /* avail entry copy */
181 desc_id = vq->avail->ring[idx_m & (vq->size - 1)];
182 if (unlikely(desc_id >= vq->size))
185 s_vring->avail->ring[idx_m & (vq->size - 1)] = desc_id;
186 desc_ring = vq->desc;
189 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
190 dlen = vq->desc[desc_id].len;
191 nr_descs = dlen / sizeof(struct vring_desc);
192 if (unlikely(nr_descs > vq->size))
195 desc_ring = (struct vring_desc *)(uintptr_t)
196 vhost_iova_to_vva(dev, vq,
197 vq->desc[desc_id].addr, &dlen,
199 if (unlikely(!desc_ring))
202 if (unlikely(dlen < vq->desc[desc_id].len)) {
203 idesc = alloc_copy_ind_table(dev, vq,
204 vq->desc[desc_id].addr,
205 vq->desc[desc_id].len);
206 if (unlikely(!idesc))
215 /* check if the buf addr is within the guest memory */
217 if (unlikely(desc_id >= vq->size))
219 if (unlikely(nr_descs-- == 0))
221 desc = desc_ring[desc_id];
222 perm = desc.flags & VRING_DESC_F_WRITE ?
223 VHOST_ACCESS_WO : VHOST_ACCESS_RO;
224 if (invalid_desc_check(dev, vq, desc.addr, desc.len,
228 } while (desc.flags & VRING_DESC_F_NEXT);
230 if (unlikely(idesc)) {
231 free_ind_table(idesc);
239 s_vring->avail->idx = idx;
241 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
242 vhost_avail_event(vq) = idx;
248 free_ind_table(idesc);
252 int __rte_experimental
253 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
255 struct virtio_net *dev = get_device(vid);
256 uint16_t idx, idx_m, desc_id;
257 struct vhost_virtqueue *vq;
258 struct vring_desc desc;
259 struct vring_desc *desc_ring;
260 struct vring_desc *idesc = NULL;
261 struct vring *s_vring;
266 if (!dev || !vring_m)
269 if (qid >= dev->nr_vring)
272 if (vq_is_packed(dev))
275 s_vring = (struct vring *)vring_m;
276 vq = dev->virtqueue[qid];
278 idx_m = s_vring->used->idx;
279 ret = (uint16_t)(idx_m - idx);
281 while (idx != idx_m) {
282 /* copy used entry, used ring logging is not covered here */
283 vq->used->ring[idx & (vq->size - 1)] =
284 s_vring->used->ring[idx & (vq->size - 1)];
286 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
287 desc_ring = vq->desc;
290 if (unlikely(desc_id >= vq->size))
293 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
294 dlen = vq->desc[desc_id].len;
295 nr_descs = dlen / sizeof(struct vring_desc);
296 if (unlikely(nr_descs > vq->size))
299 desc_ring = (struct vring_desc *)(uintptr_t)
300 vhost_iova_to_vva(dev, vq,
301 vq->desc[desc_id].addr, &dlen,
303 if (unlikely(!desc_ring))
306 if (unlikely(dlen < vq->desc[desc_id].len)) {
307 idesc = alloc_copy_ind_table(dev, vq,
308 vq->desc[desc_id].addr,
309 vq->desc[desc_id].len);
310 if (unlikely(!idesc))
319 /* dirty page logging for DMA writeable buffer */
321 if (unlikely(desc_id >= vq->size))
323 if (unlikely(nr_descs-- == 0))
325 desc = desc_ring[desc_id];
326 if (desc.flags & VRING_DESC_F_WRITE)
327 vhost_log_write(dev, desc.addr, desc.len);
329 } while (desc.flags & VRING_DESC_F_NEXT);
331 if (unlikely(idesc)) {
332 free_ind_table(idesc);
340 vq->used->idx = idx_m;
342 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
343 vring_used_event(s_vring) = idx_m;
349 free_ind_table(idesc);