1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 * Device specific vhost lib
13 #include <rte_class.h>
14 #include <rte_malloc.h>
19 static struct rte_vdpa_device vdpa_devices[MAX_VHOST_DEVICE];
20 static uint32_t vdpa_device_num;
23 is_same_vdpa_device(struct rte_vdpa_dev_addr *a,
24 struct rte_vdpa_dev_addr *b)
28 if (a->type != b->type)
33 if (a->pci_addr.domain != b->pci_addr.domain ||
34 a->pci_addr.bus != b->pci_addr.bus ||
35 a->pci_addr.devid != b->pci_addr.devid ||
36 a->pci_addr.function != b->pci_addr.function)
47 rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
48 struct rte_vdpa_dev_ops *ops)
50 struct rte_vdpa_device *dev;
53 if (vdpa_device_num >= MAX_VHOST_DEVICE || addr == NULL || ops == NULL)
56 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
57 dev = &vdpa_devices[i];
58 if (dev->ops && is_same_vdpa_device(&dev->addr, addr))
62 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
63 if (vdpa_devices[i].ops == NULL)
67 if (i == MAX_VHOST_DEVICE)
70 dev = &vdpa_devices[i];
71 memcpy(&dev->addr, addr, sizeof(struct rte_vdpa_dev_addr));
79 rte_vdpa_unregister_device(int did)
81 if (did < 0 || did >= MAX_VHOST_DEVICE || vdpa_devices[did].ops == NULL)
84 memset(&vdpa_devices[did], 0, sizeof(struct rte_vdpa_device));
91 rte_vdpa_find_device_id(struct rte_vdpa_dev_addr *addr)
93 struct rte_vdpa_device *dev;
99 for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
100 dev = &vdpa_devices[i];
101 if (dev->ops == NULL)
104 if (is_same_vdpa_device(&dev->addr, addr))
111 struct rte_vdpa_device *
112 rte_vdpa_get_device(int did)
114 if (did < 0 || did >= MAX_VHOST_DEVICE)
117 return &vdpa_devices[did];
121 rte_vdpa_get_device_num(void)
123 return vdpa_device_num;
127 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
129 struct virtio_net *dev = get_device(vid);
130 uint16_t idx, idx_m, desc_id;
131 struct vhost_virtqueue *vq;
132 struct vring_desc desc;
133 struct vring_desc *desc_ring;
134 struct vring_desc *idesc = NULL;
135 struct vring *s_vring;
140 if (!dev || !vring_m)
143 if (qid >= dev->nr_vring)
146 if (vq_is_packed(dev))
149 s_vring = (struct vring *)vring_m;
150 vq = dev->virtqueue[qid];
152 idx_m = s_vring->used->idx;
153 ret = (uint16_t)(idx_m - idx);
155 while (idx != idx_m) {
156 /* copy used entry, used ring logging is not covered here */
157 vq->used->ring[idx & (vq->size - 1)] =
158 s_vring->used->ring[idx & (vq->size - 1)];
160 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
161 desc_ring = vq->desc;
164 if (unlikely(desc_id >= vq->size))
167 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
168 dlen = vq->desc[desc_id].len;
169 nr_descs = dlen / sizeof(struct vring_desc);
170 if (unlikely(nr_descs > vq->size))
173 desc_ring = (struct vring_desc *)(uintptr_t)
174 vhost_iova_to_vva(dev, vq,
175 vq->desc[desc_id].addr, &dlen,
177 if (unlikely(!desc_ring))
180 if (unlikely(dlen < vq->desc[desc_id].len)) {
181 idesc = vhost_alloc_copy_ind_table(dev, vq,
182 vq->desc[desc_id].addr,
183 vq->desc[desc_id].len);
184 if (unlikely(!idesc))
193 /* dirty page logging for DMA writeable buffer */
195 if (unlikely(desc_id >= vq->size))
197 if (unlikely(nr_descs-- == 0))
199 desc = desc_ring[desc_id];
200 if (desc.flags & VRING_DESC_F_WRITE)
201 vhost_log_write_iova(dev, vq, desc.addr,
204 } while (desc.flags & VRING_DESC_F_NEXT);
206 if (unlikely(idesc)) {
207 free_ind_table(idesc);
215 vq->used->idx = idx_m;
217 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
218 vring_used_event(s_vring) = idx_m;
224 free_ind_table(idesc);
229 rte_vdpa_get_stats_names(int did, struct rte_vdpa_stat_name *stats_names,
232 struct rte_vdpa_device *vdpa_dev;
234 vdpa_dev = rte_vdpa_get_device(did);
238 RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats_names, -ENOTSUP);
240 return vdpa_dev->ops->get_stats_names(did, stats_names, size);
244 rte_vdpa_get_stats(int did, uint16_t qid, struct rte_vdpa_stat *stats,
247 struct rte_vdpa_device *vdpa_dev;
249 vdpa_dev = rte_vdpa_get_device(did);
256 RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats, -ENOTSUP);
258 return vdpa_dev->ops->get_stats(did, qid, stats, n);
262 rte_vdpa_reset_stats(int did, uint16_t qid)
264 struct rte_vdpa_device *vdpa_dev;
266 vdpa_dev = rte_vdpa_get_device(did);
270 RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->reset_stats, -ENOTSUP);
272 return vdpa_dev->ops->reset_stats(did, qid);
276 vdpa_dev_to_id(const struct rte_vdpa_device *dev)
279 return MAX_VHOST_DEVICE;
281 if (dev < &vdpa_devices[0] ||
282 dev >= &vdpa_devices[MAX_VHOST_DEVICE])
283 return MAX_VHOST_DEVICE;
285 return (uint16_t)(dev - vdpa_devices);
289 vdpa_dev_match(struct rte_vdpa_device *dev,
290 const struct rte_device *rte_dev)
292 struct rte_vdpa_dev_addr addr;
294 /* Only PCI bus supported for now */
295 if (strcmp(rte_dev->bus->name, "pci") != 0)
298 addr.type = VDPA_ADDR_PCI;
300 if (rte_pci_addr_parse(rte_dev->name, &addr.pci_addr) != 0)
303 if (!is_same_vdpa_device(&dev->addr, &addr))
309 /* Generic rte_vdpa_dev comparison function. */
310 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
311 const struct rte_device *rte_dev);
313 static struct rte_vdpa_device *
314 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
315 struct rte_device *rte_dev)
317 struct rte_vdpa_device *dev;
321 idx = vdpa_dev_to_id(start) + 1;
324 for (; idx < MAX_VHOST_DEVICE; idx++) {
325 dev = &vdpa_devices[idx];
327 * ToDo: Certainly better to introduce a state field,
328 * but rely on ops being set for now.
330 if (dev->ops == NULL)
332 if (cmp(dev, rte_dev) == 0)
339 vdpa_dev_iterate(const void *start,
341 const struct rte_dev_iterator *it)
343 struct rte_vdpa_device *vdpa_dev = NULL;
347 vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
352 static struct rte_class rte_class_vdpa = {
353 .dev_iterate = vdpa_dev_iterate,
356 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);