1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 * Device specific vhost lib
13 #include <rte_class.h>
14 #include <rte_malloc.h>
18 static struct rte_vdpa_device vdpa_devices[MAX_VHOST_DEVICE];
19 static uint32_t vdpa_device_num;
22 rte_vdpa_register_device(struct rte_device *rte_dev,
23 struct rte_vdpa_dev_ops *ops)
25 struct rte_vdpa_device *dev;
28 if (vdpa_device_num >= MAX_VHOST_DEVICE || ops == NULL)
31 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
32 dev = &vdpa_devices[i];
36 if (dev->device == rte_dev)
40 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
41 if (vdpa_devices[i].ops == NULL)
45 if (i == MAX_VHOST_DEVICE)
48 dev = &vdpa_devices[i];
49 dev->device = rte_dev;
57 rte_vdpa_unregister_device(int did)
59 if (did < 0 || did >= MAX_VHOST_DEVICE || vdpa_devices[did].ops == NULL)
62 memset(&vdpa_devices[did], 0, sizeof(struct rte_vdpa_device));
69 rte_vdpa_find_device_id(struct rte_vdpa_device *dev)
71 struct rte_vdpa_device *tmp_dev;
77 for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
78 tmp_dev = &vdpa_devices[i];
79 if (tmp_dev->ops == NULL)
90 rte_vdpa_find_device_id_by_name(const char *name)
92 struct rte_vdpa_device *dev;
98 for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
99 dev = &vdpa_devices[i];
100 if (dev->ops == NULL)
103 if (strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN) == 0)
110 struct rte_vdpa_device *
111 rte_vdpa_get_device(int did)
113 if (did < 0 || did >= MAX_VHOST_DEVICE)
116 return &vdpa_devices[did];
120 rte_vdpa_get_device_num(void)
122 return vdpa_device_num;
126 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
128 struct virtio_net *dev = get_device(vid);
129 uint16_t idx, idx_m, desc_id;
130 struct vhost_virtqueue *vq;
131 struct vring_desc desc;
132 struct vring_desc *desc_ring;
133 struct vring_desc *idesc = NULL;
134 struct vring *s_vring;
139 if (!dev || !vring_m)
142 if (qid >= dev->nr_vring)
145 if (vq_is_packed(dev))
148 s_vring = (struct vring *)vring_m;
149 vq = dev->virtqueue[qid];
151 idx_m = s_vring->used->idx;
152 ret = (uint16_t)(idx_m - idx);
154 while (idx != idx_m) {
155 /* copy used entry, used ring logging is not covered here */
156 vq->used->ring[idx & (vq->size - 1)] =
157 s_vring->used->ring[idx & (vq->size - 1)];
159 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
160 desc_ring = vq->desc;
163 if (unlikely(desc_id >= vq->size))
166 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
167 dlen = vq->desc[desc_id].len;
168 nr_descs = dlen / sizeof(struct vring_desc);
169 if (unlikely(nr_descs > vq->size))
172 desc_ring = (struct vring_desc *)(uintptr_t)
173 vhost_iova_to_vva(dev, vq,
174 vq->desc[desc_id].addr, &dlen,
176 if (unlikely(!desc_ring))
179 if (unlikely(dlen < vq->desc[desc_id].len)) {
180 idesc = vhost_alloc_copy_ind_table(dev, vq,
181 vq->desc[desc_id].addr,
182 vq->desc[desc_id].len);
183 if (unlikely(!idesc))
192 /* dirty page logging for DMA writeable buffer */
194 if (unlikely(desc_id >= vq->size))
196 if (unlikely(nr_descs-- == 0))
198 desc = desc_ring[desc_id];
199 if (desc.flags & VRING_DESC_F_WRITE)
200 vhost_log_write_iova(dev, vq, desc.addr,
203 } while (desc.flags & VRING_DESC_F_NEXT);
205 if (unlikely(idesc)) {
206 free_ind_table(idesc);
214 vq->used->idx = idx_m;
216 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
217 vring_used_event(s_vring) = idx_m;
223 free_ind_table(idesc);
228 rte_vdpa_get_stats_names(int did, struct rte_vdpa_stat_name *stats_names,
231 struct rte_vdpa_device *vdpa_dev;
233 vdpa_dev = rte_vdpa_get_device(did);
237 RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats_names, -ENOTSUP);
239 return vdpa_dev->ops->get_stats_names(did, stats_names, size);
243 rte_vdpa_get_stats(int did, uint16_t qid, struct rte_vdpa_stat *stats,
246 struct rte_vdpa_device *vdpa_dev;
248 vdpa_dev = rte_vdpa_get_device(did);
255 RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats, -ENOTSUP);
257 return vdpa_dev->ops->get_stats(did, qid, stats, n);
261 rte_vdpa_reset_stats(int did, uint16_t qid)
263 struct rte_vdpa_device *vdpa_dev;
265 vdpa_dev = rte_vdpa_get_device(did);
269 RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->reset_stats, -ENOTSUP);
271 return vdpa_dev->ops->reset_stats(did, qid);
275 vdpa_dev_to_id(const struct rte_vdpa_device *dev)
278 return MAX_VHOST_DEVICE;
280 if (dev < &vdpa_devices[0] ||
281 dev >= &vdpa_devices[MAX_VHOST_DEVICE])
282 return MAX_VHOST_DEVICE;
284 return (uint16_t)(dev - vdpa_devices);
288 vdpa_dev_match(struct rte_vdpa_device *dev,
289 const struct rte_device *rte_dev)
291 if (dev->device == rte_dev)
297 /* Generic rte_vdpa_dev comparison function. */
298 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
299 const struct rte_device *rte_dev);
301 static struct rte_vdpa_device *
302 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
303 struct rte_device *rte_dev)
305 struct rte_vdpa_device *dev;
309 idx = vdpa_dev_to_id(start) + 1;
312 for (; idx < MAX_VHOST_DEVICE; idx++) {
313 dev = &vdpa_devices[idx];
315 * ToDo: Certainly better to introduce a state field,
316 * but rely on ops being set for now.
318 if (dev->ops == NULL)
320 if (cmp(dev, rte_dev) == 0)
327 vdpa_dev_iterate(const void *start,
329 const struct rte_dev_iterator *it)
331 struct rte_vdpa_device *vdpa_dev = NULL;
335 vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
340 static struct rte_class rte_class_vdpa = {
341 .dev_iterate = vdpa_dev_iterate,
344 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);