vhost: remove vDPA device count API
[dpdk.git] / lib / librte_vhost / vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 /**
6  * @file
7  *
8  * Device specific vhost lib
9  */
10
11 #include <stdbool.h>
12 #include <sys/queue.h>
13
14 #include <rte_class.h>
15 #include <rte_malloc.h>
16 #include <rte_spinlock.h>
17 #include <rte_tailq.h>
18
19 #include "rte_vdpa.h"
20 #include "vhost.h"
21
22 /** Double linked list of vDPA devices. */
23 TAILQ_HEAD(vdpa_device_list, rte_vdpa_device);
24
25 static struct vdpa_device_list vdpa_device_list =
26                 TAILQ_HEAD_INITIALIZER(vdpa_device_list);
27 static rte_spinlock_t vdpa_device_list_lock = RTE_SPINLOCK_INITIALIZER;
28
29
30 /* Unsafe, needs to be called with vdpa_device_list_lock held */
31 static struct rte_vdpa_device *
32 __vdpa_find_device_by_name(const char *name)
33 {
34         struct rte_vdpa_device *dev, *ret = NULL;
35
36         if (name == NULL)
37                 return NULL;
38
39         TAILQ_FOREACH(dev, &vdpa_device_list, next) {
40                 if (!strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN)) {
41                         ret = dev;
42                         break;
43                 }
44         }
45
46         return ret;
47 }
48
49 struct rte_vdpa_device *
50 rte_vdpa_find_device_by_name(const char *name)
51 {
52         struct rte_vdpa_device *dev;
53
54         rte_spinlock_lock(&vdpa_device_list_lock);
55         dev = __vdpa_find_device_by_name(name);
56         rte_spinlock_unlock(&vdpa_device_list_lock);
57
58         return dev;
59 }
60
61 struct rte_device *
62 rte_vdpa_get_rte_device(struct rte_vdpa_device *vdpa_dev)
63 {
64         if (vdpa_dev == NULL)
65                 return NULL;
66
67         return vdpa_dev->device;
68 }
69
70 struct rte_vdpa_device *
71 rte_vdpa_register_device(struct rte_device *rte_dev,
72                 struct rte_vdpa_dev_ops *ops)
73 {
74         struct rte_vdpa_device *dev;
75
76         if (ops == NULL)
77                 return NULL;
78
79         rte_spinlock_lock(&vdpa_device_list_lock);
80         /* Check the device hasn't been register already */
81         dev = __vdpa_find_device_by_name(rte_dev->name);
82         if (dev) {
83                 dev = NULL;
84                 goto out_unlock;
85         }
86
87         dev = rte_zmalloc(NULL, sizeof(*dev), 0);
88         if (!dev)
89                 goto out_unlock;
90
91         dev->device = rte_dev;
92         dev->ops = ops;
93         TAILQ_INSERT_TAIL(&vdpa_device_list, dev, next);
94 out_unlock:
95         rte_spinlock_unlock(&vdpa_device_list_lock);
96
97         return dev;
98 }
99
100 int
101 rte_vdpa_unregister_device(struct rte_vdpa_device *dev)
102 {
103         struct rte_vdpa_device *cur_dev, *tmp_dev;
104         int ret = -1;
105
106         rte_spinlock_lock(&vdpa_device_list_lock);
107         TAILQ_FOREACH_SAFE(cur_dev, &vdpa_device_list, next, tmp_dev) {
108                 if (dev != cur_dev)
109                         continue;
110
111                 TAILQ_REMOVE(&vdpa_device_list, dev, next);
112                 rte_free(dev);
113                 ret = 0;
114                 break;
115         }
116         rte_spinlock_unlock(&vdpa_device_list_lock);
117
118         return ret;
119 }
120
121 int
122 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
123 {
124         struct virtio_net *dev = get_device(vid);
125         uint16_t idx, idx_m, desc_id;
126         struct vhost_virtqueue *vq;
127         struct vring_desc desc;
128         struct vring_desc *desc_ring;
129         struct vring_desc *idesc = NULL;
130         struct vring *s_vring;
131         uint64_t dlen;
132         uint32_t nr_descs;
133         int ret;
134
135         if (!dev || !vring_m)
136                 return -1;
137
138         if (qid >= dev->nr_vring)
139                 return -1;
140
141         if (vq_is_packed(dev))
142                 return -1;
143
144         s_vring = (struct vring *)vring_m;
145         vq = dev->virtqueue[qid];
146         idx = vq->used->idx;
147         idx_m = s_vring->used->idx;
148         ret = (uint16_t)(idx_m - idx);
149
150         while (idx != idx_m) {
151                 /* copy used entry, used ring logging is not covered here */
152                 vq->used->ring[idx & (vq->size - 1)] =
153                         s_vring->used->ring[idx & (vq->size - 1)];
154
155                 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
156                 desc_ring = vq->desc;
157                 nr_descs = vq->size;
158
159                 if (unlikely(desc_id >= vq->size))
160                         return -1;
161
162                 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
163                         dlen = vq->desc[desc_id].len;
164                         nr_descs = dlen / sizeof(struct vring_desc);
165                         if (unlikely(nr_descs > vq->size))
166                                 return -1;
167
168                         desc_ring = (struct vring_desc *)(uintptr_t)
169                                 vhost_iova_to_vva(dev, vq,
170                                                 vq->desc[desc_id].addr, &dlen,
171                                                 VHOST_ACCESS_RO);
172                         if (unlikely(!desc_ring))
173                                 return -1;
174
175                         if (unlikely(dlen < vq->desc[desc_id].len)) {
176                                 idesc = vhost_alloc_copy_ind_table(dev, vq,
177                                                 vq->desc[desc_id].addr,
178                                                 vq->desc[desc_id].len);
179                                 if (unlikely(!idesc))
180                                         return -1;
181
182                                 desc_ring = idesc;
183                         }
184
185                         desc_id = 0;
186                 }
187
188                 /* dirty page logging for DMA writeable buffer */
189                 do {
190                         if (unlikely(desc_id >= vq->size))
191                                 goto fail;
192                         if (unlikely(nr_descs-- == 0))
193                                 goto fail;
194                         desc = desc_ring[desc_id];
195                         if (desc.flags & VRING_DESC_F_WRITE)
196                                 vhost_log_write_iova(dev, vq, desc.addr,
197                                                      desc.len);
198                         desc_id = desc.next;
199                 } while (desc.flags & VRING_DESC_F_NEXT);
200
201                 if (unlikely(idesc)) {
202                         free_ind_table(idesc);
203                         idesc = NULL;
204                 }
205
206                 idx++;
207         }
208
209         rte_smp_wmb();
210         vq->used->idx = idx_m;
211
212         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
213                 vring_used_event(s_vring) = idx_m;
214
215         return ret;
216
217 fail:
218         if (unlikely(idesc))
219                 free_ind_table(idesc);
220         return -1;
221 }
222
223 int
224 rte_vdpa_get_queue_num(struct rte_vdpa_device *dev, uint32_t *queue_num)
225 {
226         if (dev == NULL || dev->ops == NULL || dev->ops->get_queue_num == NULL)
227                 return -1;
228
229         return dev->ops->get_queue_num(dev, queue_num);
230 }
231
232 int
233 rte_vdpa_get_features(struct rte_vdpa_device *dev, uint64_t *features)
234 {
235         if (dev == NULL || dev->ops == NULL || dev->ops->get_features == NULL)
236                 return -1;
237
238         return dev->ops->get_features(dev, features);
239 }
240
241 int
242 rte_vdpa_get_protocol_features(struct rte_vdpa_device *dev, uint64_t *features)
243 {
244         if (dev == NULL || dev->ops == NULL ||
245                         dev->ops->get_protocol_features == NULL)
246                 return -1;
247
248         return dev->ops->get_protocol_features(dev, features);
249 }
250
251 int
252 rte_vdpa_get_stats_names(struct rte_vdpa_device *dev,
253                 struct rte_vdpa_stat_name *stats_names,
254                 unsigned int size)
255 {
256         if (!dev)
257                 return -EINVAL;
258
259         RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats_names, -ENOTSUP);
260
261         return dev->ops->get_stats_names(dev, stats_names, size);
262 }
263
264 int
265 rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid,
266                 struct rte_vdpa_stat *stats, unsigned int n)
267 {
268         if (!dev || !stats || !n)
269                 return -EINVAL;
270
271         RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats, -ENOTSUP);
272
273         return dev->ops->get_stats(dev, qid, stats, n);
274 }
275
276 int
277 rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid)
278 {
279         if (!dev)
280                 return -EINVAL;
281
282         RTE_FUNC_PTR_OR_ERR_RET(dev->ops->reset_stats, -ENOTSUP);
283
284         return dev->ops->reset_stats(dev, qid);
285 }
286
287 static int
288 vdpa_dev_match(struct rte_vdpa_device *dev,
289               const struct rte_device *rte_dev)
290 {
291         if (dev->device == rte_dev)
292                 return 0;
293
294         return -1;
295 }
296
297 /* Generic rte_vdpa_dev comparison function. */
298 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
299                 const struct rte_device *rte_dev);
300
301 static struct rte_vdpa_device *
302 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
303                 struct rte_device *rte_dev)
304 {
305         struct rte_vdpa_device *dev;
306
307         rte_spinlock_lock(&vdpa_device_list_lock);
308         if (start == NULL)
309                 dev = TAILQ_FIRST(&vdpa_device_list);
310         else
311                 dev = TAILQ_NEXT(start, next);
312
313         while (dev != NULL) {
314                 if (cmp(dev, rte_dev) == 0)
315                         break;
316
317                 dev = TAILQ_NEXT(dev, next);
318         }
319         rte_spinlock_unlock(&vdpa_device_list_lock);
320
321         return dev;
322 }
323
324 static void *
325 vdpa_dev_iterate(const void *start,
326                 const char *str,
327                 const struct rte_dev_iterator *it)
328 {
329         struct rte_vdpa_device *vdpa_dev = NULL;
330
331         RTE_SET_USED(str);
332
333         vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
334
335         return vdpa_dev;
336 }
337
338 static struct rte_class rte_class_vdpa = {
339         .dev_iterate = vdpa_dev_iterate,
340 };
341
342 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);