1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 * Device specific vhost lib
13 #include <rte_class.h>
14 #include <rte_malloc.h>
18 static struct rte_vdpa_device vdpa_devices[MAX_VHOST_DEVICE];
19 static uint32_t vdpa_device_num;
23 rte_vdpa_find_device_id(struct rte_vdpa_device *dev)
25 struct rte_vdpa_device *tmp_dev;
31 for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
32 tmp_dev = &vdpa_devices[i];
33 if (tmp_dev->ops == NULL)
44 rte_vdpa_find_device_id_by_name(const char *name)
46 struct rte_vdpa_device *dev;
52 for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
53 dev = &vdpa_devices[i];
57 if (strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN) == 0)
64 struct rte_vdpa_device *
65 rte_vdpa_get_device(int did)
67 if (did < 0 || did >= MAX_VHOST_DEVICE)
70 return &vdpa_devices[did];
73 struct rte_vdpa_device *
74 rte_vdpa_register_device(struct rte_device *rte_dev,
75 struct rte_vdpa_dev_ops *ops)
77 struct rte_vdpa_device *dev;
80 if (vdpa_device_num >= MAX_VHOST_DEVICE || ops == NULL)
83 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
84 dev = &vdpa_devices[i];
88 if (dev->device == rte_dev)
92 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
93 if (vdpa_devices[i].ops == NULL)
97 if (i == MAX_VHOST_DEVICE)
100 dev = &vdpa_devices[i];
101 dev->device = rte_dev;
109 rte_vdpa_unregister_device(struct rte_vdpa_device *vdev)
111 int did = rte_vdpa_find_device_id(vdev);
113 if (did < 0 || vdpa_devices[did].ops == NULL)
116 memset(&vdpa_devices[did], 0, sizeof(struct rte_vdpa_device));
123 rte_vdpa_get_device_num(void)
125 return vdpa_device_num;
129 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
131 struct virtio_net *dev = get_device(vid);
132 uint16_t idx, idx_m, desc_id;
133 struct vhost_virtqueue *vq;
134 struct vring_desc desc;
135 struct vring_desc *desc_ring;
136 struct vring_desc *idesc = NULL;
137 struct vring *s_vring;
142 if (!dev || !vring_m)
145 if (qid >= dev->nr_vring)
148 if (vq_is_packed(dev))
151 s_vring = (struct vring *)vring_m;
152 vq = dev->virtqueue[qid];
154 idx_m = s_vring->used->idx;
155 ret = (uint16_t)(idx_m - idx);
157 while (idx != idx_m) {
158 /* copy used entry, used ring logging is not covered here */
159 vq->used->ring[idx & (vq->size - 1)] =
160 s_vring->used->ring[idx & (vq->size - 1)];
162 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
163 desc_ring = vq->desc;
166 if (unlikely(desc_id >= vq->size))
169 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
170 dlen = vq->desc[desc_id].len;
171 nr_descs = dlen / sizeof(struct vring_desc);
172 if (unlikely(nr_descs > vq->size))
175 desc_ring = (struct vring_desc *)(uintptr_t)
176 vhost_iova_to_vva(dev, vq,
177 vq->desc[desc_id].addr, &dlen,
179 if (unlikely(!desc_ring))
182 if (unlikely(dlen < vq->desc[desc_id].len)) {
183 idesc = vhost_alloc_copy_ind_table(dev, vq,
184 vq->desc[desc_id].addr,
185 vq->desc[desc_id].len);
186 if (unlikely(!idesc))
195 /* dirty page logging for DMA writeable buffer */
197 if (unlikely(desc_id >= vq->size))
199 if (unlikely(nr_descs-- == 0))
201 desc = desc_ring[desc_id];
202 if (desc.flags & VRING_DESC_F_WRITE)
203 vhost_log_write_iova(dev, vq, desc.addr,
206 } while (desc.flags & VRING_DESC_F_NEXT);
208 if (unlikely(idesc)) {
209 free_ind_table(idesc);
217 vq->used->idx = idx_m;
219 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
220 vring_used_event(s_vring) = idx_m;
226 free_ind_table(idesc);
231 rte_vdpa_get_stats_names(int did, struct rte_vdpa_stat_name *stats_names,
234 struct rte_vdpa_device *vdpa_dev;
236 vdpa_dev = rte_vdpa_get_device(did);
240 RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats_names, -ENOTSUP);
242 return vdpa_dev->ops->get_stats_names(vdpa_dev, stats_names, size);
246 rte_vdpa_get_stats(int did, uint16_t qid, struct rte_vdpa_stat *stats,
249 struct rte_vdpa_device *vdpa_dev;
251 vdpa_dev = rte_vdpa_get_device(did);
258 RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats, -ENOTSUP);
260 return vdpa_dev->ops->get_stats(vdpa_dev, qid, stats, n);
264 rte_vdpa_reset_stats(int did, uint16_t qid)
266 struct rte_vdpa_device *vdpa_dev;
268 vdpa_dev = rte_vdpa_get_device(did);
272 RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->reset_stats, -ENOTSUP);
274 return vdpa_dev->ops->reset_stats(vdpa_dev, qid);
278 vdpa_dev_to_id(const struct rte_vdpa_device *dev)
281 return MAX_VHOST_DEVICE;
283 if (dev < &vdpa_devices[0] ||
284 dev >= &vdpa_devices[MAX_VHOST_DEVICE])
285 return MAX_VHOST_DEVICE;
287 return (uint16_t)(dev - vdpa_devices);
291 vdpa_dev_match(struct rte_vdpa_device *dev,
292 const struct rte_device *rte_dev)
294 if (dev->device == rte_dev)
300 /* Generic rte_vdpa_dev comparison function. */
301 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
302 const struct rte_device *rte_dev);
304 static struct rte_vdpa_device *
305 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
306 struct rte_device *rte_dev)
308 struct rte_vdpa_device *dev;
312 idx = vdpa_dev_to_id(start) + 1;
315 for (; idx < MAX_VHOST_DEVICE; idx++) {
316 dev = &vdpa_devices[idx];
318 * ToDo: Certainly better to introduce a state field,
319 * but rely on ops being set for now.
321 if (dev->ops == NULL)
323 if (cmp(dev, rte_dev) == 0)
330 vdpa_dev_iterate(const void *start,
332 const struct rte_dev_iterator *it)
334 struct rte_vdpa_device *vdpa_dev = NULL;
338 vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
343 static struct rte_class rte_class_vdpa = {
344 .dev_iterate = vdpa_dev_iterate,
347 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);