1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 * Device specific vhost lib
13 #include <rte_class.h>
14 #include <rte_malloc.h>
18 static struct rte_vdpa_device vdpa_devices[MAX_VHOST_DEVICE];
19 static uint32_t vdpa_device_num;
22 struct rte_vdpa_device *
23 rte_vdpa_find_device_by_name(const char *name)
25 struct rte_vdpa_device *dev;
31 for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
32 dev = &vdpa_devices[i];
36 if (strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN) == 0)
44 rte_vdpa_get_rte_device(struct rte_vdpa_device *vdpa_dev)
49 return vdpa_dev->device;
52 struct rte_vdpa_device *
53 rte_vdpa_register_device(struct rte_device *rte_dev,
54 struct rte_vdpa_dev_ops *ops)
56 struct rte_vdpa_device *dev;
59 if (vdpa_device_num >= MAX_VHOST_DEVICE || ops == NULL)
62 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
63 dev = &vdpa_devices[i];
67 if (dev->device == rte_dev)
71 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
72 if (vdpa_devices[i].ops == NULL)
76 if (i == MAX_VHOST_DEVICE)
79 dev = &vdpa_devices[i];
80 dev->device = rte_dev;
88 rte_vdpa_unregister_device(struct rte_vdpa_device *vdev)
92 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
93 if (vdev != &vdpa_devices[i])
96 memset(vdev, 0, sizeof(struct rte_vdpa_device));
106 rte_vdpa_get_device_num(void)
108 return vdpa_device_num;
112 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
114 struct virtio_net *dev = get_device(vid);
115 uint16_t idx, idx_m, desc_id;
116 struct vhost_virtqueue *vq;
117 struct vring_desc desc;
118 struct vring_desc *desc_ring;
119 struct vring_desc *idesc = NULL;
120 struct vring *s_vring;
125 if (!dev || !vring_m)
128 if (qid >= dev->nr_vring)
131 if (vq_is_packed(dev))
134 s_vring = (struct vring *)vring_m;
135 vq = dev->virtqueue[qid];
137 idx_m = s_vring->used->idx;
138 ret = (uint16_t)(idx_m - idx);
140 while (idx != idx_m) {
141 /* copy used entry, used ring logging is not covered here */
142 vq->used->ring[idx & (vq->size - 1)] =
143 s_vring->used->ring[idx & (vq->size - 1)];
145 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
146 desc_ring = vq->desc;
149 if (unlikely(desc_id >= vq->size))
152 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
153 dlen = vq->desc[desc_id].len;
154 nr_descs = dlen / sizeof(struct vring_desc);
155 if (unlikely(nr_descs > vq->size))
158 desc_ring = (struct vring_desc *)(uintptr_t)
159 vhost_iova_to_vva(dev, vq,
160 vq->desc[desc_id].addr, &dlen,
162 if (unlikely(!desc_ring))
165 if (unlikely(dlen < vq->desc[desc_id].len)) {
166 idesc = vhost_alloc_copy_ind_table(dev, vq,
167 vq->desc[desc_id].addr,
168 vq->desc[desc_id].len);
169 if (unlikely(!idesc))
178 /* dirty page logging for DMA writeable buffer */
180 if (unlikely(desc_id >= vq->size))
182 if (unlikely(nr_descs-- == 0))
184 desc = desc_ring[desc_id];
185 if (desc.flags & VRING_DESC_F_WRITE)
186 vhost_log_write_iova(dev, vq, desc.addr,
189 } while (desc.flags & VRING_DESC_F_NEXT);
191 if (unlikely(idesc)) {
192 free_ind_table(idesc);
200 vq->used->idx = idx_m;
202 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
203 vring_used_event(s_vring) = idx_m;
209 free_ind_table(idesc);
214 rte_vdpa_get_stats_names(struct rte_vdpa_device *dev,
215 struct rte_vdpa_stat_name *stats_names,
221 RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats_names, -ENOTSUP);
223 return dev->ops->get_stats_names(dev, stats_names, size);
227 rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid,
228 struct rte_vdpa_stat *stats, unsigned int n)
230 if (!dev || !stats || !n)
233 RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats, -ENOTSUP);
235 return dev->ops->get_stats(dev, qid, stats, n);
239 rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid)
244 RTE_FUNC_PTR_OR_ERR_RET(dev->ops->reset_stats, -ENOTSUP);
246 return dev->ops->reset_stats(dev, qid);
250 vdpa_dev_to_id(const struct rte_vdpa_device *dev)
253 return MAX_VHOST_DEVICE;
255 if (dev < &vdpa_devices[0] ||
256 dev >= &vdpa_devices[MAX_VHOST_DEVICE])
257 return MAX_VHOST_DEVICE;
259 return (uint16_t)(dev - vdpa_devices);
263 vdpa_dev_match(struct rte_vdpa_device *dev,
264 const struct rte_device *rte_dev)
266 if (dev->device == rte_dev)
272 /* Generic rte_vdpa_dev comparison function. */
273 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
274 const struct rte_device *rte_dev);
276 static struct rte_vdpa_device *
277 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
278 struct rte_device *rte_dev)
280 struct rte_vdpa_device *dev;
284 idx = vdpa_dev_to_id(start) + 1;
287 for (; idx < MAX_VHOST_DEVICE; idx++) {
288 dev = &vdpa_devices[idx];
290 * ToDo: Certainly better to introduce a state field,
291 * but rely on ops being set for now.
293 if (dev->ops == NULL)
295 if (cmp(dev, rte_dev) == 0)
302 vdpa_dev_iterate(const void *start,
304 const struct rte_dev_iterator *it)
306 struct rte_vdpa_device *vdpa_dev = NULL;
310 vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
315 static struct rte_class rte_class_vdpa = {
316 .dev_iterate = vdpa_dev_iterate,
319 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);