1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
8 * Device specific vhost lib
11 #include <sys/queue.h>
13 #include <rte_class.h>
14 #include <rte_malloc.h>
15 #include <rte_spinlock.h>
16 #include <rte_tailq.h>
19 #include "vdpa_driver.h"
22 /** Double linked list of vDPA devices. */
23 TAILQ_HEAD(vdpa_device_list, rte_vdpa_device);
25 static struct vdpa_device_list vdpa_device_list =
26 TAILQ_HEAD_INITIALIZER(vdpa_device_list);
27 static rte_spinlock_t vdpa_device_list_lock = RTE_SPINLOCK_INITIALIZER;
30 /* Unsafe, needs to be called with vdpa_device_list_lock held */
31 static struct rte_vdpa_device *
32 __vdpa_find_device_by_name(const char *name)
34 struct rte_vdpa_device *dev, *ret = NULL;
39 TAILQ_FOREACH(dev, &vdpa_device_list, next) {
40 if (!strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN)) {
49 struct rte_vdpa_device *
50 rte_vdpa_find_device_by_name(const char *name)
52 struct rte_vdpa_device *dev;
54 rte_spinlock_lock(&vdpa_device_list_lock);
55 dev = __vdpa_find_device_by_name(name);
56 rte_spinlock_unlock(&vdpa_device_list_lock);
62 rte_vdpa_get_rte_device(struct rte_vdpa_device *vdpa_dev)
67 return vdpa_dev->device;
70 struct rte_vdpa_device *
71 rte_vdpa_register_device(struct rte_device *rte_dev,
72 struct rte_vdpa_dev_ops *ops)
74 struct rte_vdpa_device *dev;
79 /* Check mandatory ops are implemented */
80 if (!ops->get_queue_num || !ops->get_features ||
81 !ops->get_protocol_features || !ops->dev_conf ||
82 !ops->dev_close || !ops->set_vring_state ||
84 VHOST_LOG_CONFIG(ERR, "(%s) Some mandatory vDPA ops aren't implemented\n",
89 rte_spinlock_lock(&vdpa_device_list_lock);
90 /* Check the device hasn't been register already */
91 dev = __vdpa_find_device_by_name(rte_dev->name);
97 dev = rte_zmalloc(NULL, sizeof(*dev), 0);
101 dev->device = rte_dev;
103 TAILQ_INSERT_TAIL(&vdpa_device_list, dev, next);
105 rte_spinlock_unlock(&vdpa_device_list_lock);
111 rte_vdpa_unregister_device(struct rte_vdpa_device *dev)
113 struct rte_vdpa_device *cur_dev, *tmp_dev;
116 rte_spinlock_lock(&vdpa_device_list_lock);
117 RTE_TAILQ_FOREACH_SAFE(cur_dev, &vdpa_device_list, next, tmp_dev) {
121 TAILQ_REMOVE(&vdpa_device_list, dev, next);
126 rte_spinlock_unlock(&vdpa_device_list_lock);
132 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
134 struct virtio_net *dev = get_device(vid);
135 uint16_t idx, idx_m, desc_id;
136 struct vhost_virtqueue *vq;
137 struct vring_desc desc;
138 struct vring_desc *desc_ring;
139 struct vring_desc *idesc = NULL;
140 struct vring *s_vring;
145 if (!dev || !vring_m)
148 if (qid >= dev->nr_vring)
151 if (vq_is_packed(dev))
154 s_vring = (struct vring *)vring_m;
155 vq = dev->virtqueue[qid];
157 idx_m = s_vring->used->idx;
158 ret = (uint16_t)(idx_m - idx);
160 while (idx != idx_m) {
161 /* copy used entry, used ring logging is not covered here */
162 vq->used->ring[idx & (vq->size - 1)] =
163 s_vring->used->ring[idx & (vq->size - 1)];
165 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
166 desc_ring = vq->desc;
169 if (unlikely(desc_id >= vq->size))
172 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
173 dlen = vq->desc[desc_id].len;
174 nr_descs = dlen / sizeof(struct vring_desc);
175 if (unlikely(nr_descs > vq->size))
178 desc_ring = (struct vring_desc *)(uintptr_t)
179 vhost_iova_to_vva(dev, vq,
180 vq->desc[desc_id].addr, &dlen,
182 if (unlikely(!desc_ring))
185 if (unlikely(dlen < vq->desc[desc_id].len)) {
186 idesc = vhost_alloc_copy_ind_table(dev, vq,
187 vq->desc[desc_id].addr,
188 vq->desc[desc_id].len);
189 if (unlikely(!idesc))
198 /* dirty page logging for DMA writeable buffer */
200 if (unlikely(desc_id >= vq->size))
202 if (unlikely(nr_descs-- == 0))
204 desc = desc_ring[desc_id];
205 if (desc.flags & VRING_DESC_F_WRITE)
206 vhost_log_write_iova(dev, vq, desc.addr,
209 } while (desc.flags & VRING_DESC_F_NEXT);
211 if (unlikely(idesc)) {
212 free_ind_table(idesc);
219 /* used idx is the synchronization point for the split vring */
220 __atomic_store_n(&vq->used->idx, idx_m, __ATOMIC_RELEASE);
222 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
223 vring_used_event(s_vring) = idx_m;
229 free_ind_table(idesc);
234 rte_vdpa_get_queue_num(struct rte_vdpa_device *dev, uint32_t *queue_num)
236 if (dev == NULL || dev->ops == NULL || dev->ops->get_queue_num == NULL)
239 return dev->ops->get_queue_num(dev, queue_num);
243 rte_vdpa_get_features(struct rte_vdpa_device *dev, uint64_t *features)
245 if (dev == NULL || dev->ops == NULL || dev->ops->get_features == NULL)
248 return dev->ops->get_features(dev, features);
252 rte_vdpa_get_protocol_features(struct rte_vdpa_device *dev, uint64_t *features)
254 if (dev == NULL || dev->ops == NULL ||
255 dev->ops->get_protocol_features == NULL)
258 return dev->ops->get_protocol_features(dev, features);
262 rte_vdpa_get_stats_names(struct rte_vdpa_device *dev,
263 struct rte_vdpa_stat_name *stats_names,
269 RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats_names, -ENOTSUP);
271 return dev->ops->get_stats_names(dev, stats_names, size);
275 rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid,
276 struct rte_vdpa_stat *stats, unsigned int n)
278 if (!dev || !stats || !n)
281 RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats, -ENOTSUP);
283 return dev->ops->get_stats(dev, qid, stats, n);
287 rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid)
292 RTE_FUNC_PTR_OR_ERR_RET(dev->ops->reset_stats, -ENOTSUP);
294 return dev->ops->reset_stats(dev, qid);
298 vdpa_dev_match(struct rte_vdpa_device *dev,
299 const struct rte_device *rte_dev)
301 if (dev->device == rte_dev)
307 /* Generic rte_vdpa_dev comparison function. */
308 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
309 const struct rte_device *rte_dev);
311 static struct rte_vdpa_device *
312 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
313 struct rte_device *rte_dev)
315 struct rte_vdpa_device *dev;
317 rte_spinlock_lock(&vdpa_device_list_lock);
319 dev = TAILQ_FIRST(&vdpa_device_list);
321 dev = TAILQ_NEXT(start, next);
323 while (dev != NULL) {
324 if (cmp(dev, rte_dev) == 0)
327 dev = TAILQ_NEXT(dev, next);
329 rte_spinlock_unlock(&vdpa_device_list_lock);
335 vdpa_dev_iterate(const void *start,
337 const struct rte_dev_iterator *it)
339 struct rte_vdpa_device *vdpa_dev = NULL;
343 vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
348 static struct rte_class rte_class_vdpa = {
349 .dev_iterate = vdpa_dev_iterate,
352 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);