vhost: replace device ID in applications
[dpdk.git] / lib / librte_vhost / vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 /**
6  * @file
7  *
8  * Device specific vhost lib
9  */
10
11 #include <stdbool.h>
12
13 #include <rte_class.h>
14 #include <rte_malloc.h>
15 #include "rte_vdpa.h"
16 #include "vhost.h"
17
18 static struct rte_vdpa_device vdpa_devices[MAX_VHOST_DEVICE];
19 static uint32_t vdpa_device_num;
20
21
22 int
23 rte_vdpa_find_device_id(struct rte_vdpa_device *dev)
24 {
25         struct rte_vdpa_device *tmp_dev;
26         int i;
27
28         if (dev == NULL)
29                 return -1;
30
31         for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
32                 tmp_dev = &vdpa_devices[i];
33                 if (tmp_dev->ops == NULL)
34                         continue;
35
36                 if (tmp_dev == dev)
37                         return i;
38         }
39
40         return -1;
41 }
42
43 struct rte_vdpa_device *
44 rte_vdpa_find_device_by_name(const char *name)
45 {
46         struct rte_vdpa_device *dev;
47         int i;
48
49         if (name == NULL)
50                 return NULL;
51
52         for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
53                 dev = &vdpa_devices[i];
54                 if (dev->ops == NULL)
55                         continue;
56
57                 if (strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN) == 0)
58                         return dev;
59         }
60
61         return NULL;
62 }
63
64 struct rte_device *
65 rte_vdpa_get_rte_device(struct rte_vdpa_device *vdpa_dev)
66 {
67         if (vdpa_dev == NULL)
68                 return NULL;
69
70         return vdpa_dev->device;
71 }
72
73 struct rte_vdpa_device *
74 rte_vdpa_get_device(int did)
75 {
76         if (did < 0 || did >= MAX_VHOST_DEVICE)
77                 return NULL;
78
79         return &vdpa_devices[did];
80 }
81
82 struct rte_vdpa_device *
83 rte_vdpa_register_device(struct rte_device *rte_dev,
84                 struct rte_vdpa_dev_ops *ops)
85 {
86         struct rte_vdpa_device *dev;
87         int i;
88
89         if (vdpa_device_num >= MAX_VHOST_DEVICE || ops == NULL)
90                 return NULL;
91
92         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
93                 dev = &vdpa_devices[i];
94                 if (dev->ops == NULL)
95                         continue;
96
97                 if (dev->device == rte_dev)
98                         return NULL;
99         }
100
101         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
102                 if (vdpa_devices[i].ops == NULL)
103                         break;
104         }
105
106         if (i == MAX_VHOST_DEVICE)
107                 return NULL;
108
109         dev = &vdpa_devices[i];
110         dev->device = rte_dev;
111         dev->ops = ops;
112         vdpa_device_num++;
113
114         return dev;
115 }
116
117 int
118 rte_vdpa_unregister_device(struct rte_vdpa_device *vdev)
119 {
120         int did = rte_vdpa_find_device_id(vdev);
121
122         if (did < 0 || vdpa_devices[did].ops == NULL)
123                 return -1;
124
125         memset(&vdpa_devices[did], 0, sizeof(struct rte_vdpa_device));
126         vdpa_device_num--;
127
128         return 0;
129 }
130
131 int
132 rte_vdpa_get_device_num(void)
133 {
134         return vdpa_device_num;
135 }
136
137 int
138 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
139 {
140         struct virtio_net *dev = get_device(vid);
141         uint16_t idx, idx_m, desc_id;
142         struct vhost_virtqueue *vq;
143         struct vring_desc desc;
144         struct vring_desc *desc_ring;
145         struct vring_desc *idesc = NULL;
146         struct vring *s_vring;
147         uint64_t dlen;
148         uint32_t nr_descs;
149         int ret;
150
151         if (!dev || !vring_m)
152                 return -1;
153
154         if (qid >= dev->nr_vring)
155                 return -1;
156
157         if (vq_is_packed(dev))
158                 return -1;
159
160         s_vring = (struct vring *)vring_m;
161         vq = dev->virtqueue[qid];
162         idx = vq->used->idx;
163         idx_m = s_vring->used->idx;
164         ret = (uint16_t)(idx_m - idx);
165
166         while (idx != idx_m) {
167                 /* copy used entry, used ring logging is not covered here */
168                 vq->used->ring[idx & (vq->size - 1)] =
169                         s_vring->used->ring[idx & (vq->size - 1)];
170
171                 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
172                 desc_ring = vq->desc;
173                 nr_descs = vq->size;
174
175                 if (unlikely(desc_id >= vq->size))
176                         return -1;
177
178                 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
179                         dlen = vq->desc[desc_id].len;
180                         nr_descs = dlen / sizeof(struct vring_desc);
181                         if (unlikely(nr_descs > vq->size))
182                                 return -1;
183
184                         desc_ring = (struct vring_desc *)(uintptr_t)
185                                 vhost_iova_to_vva(dev, vq,
186                                                 vq->desc[desc_id].addr, &dlen,
187                                                 VHOST_ACCESS_RO);
188                         if (unlikely(!desc_ring))
189                                 return -1;
190
191                         if (unlikely(dlen < vq->desc[desc_id].len)) {
192                                 idesc = vhost_alloc_copy_ind_table(dev, vq,
193                                                 vq->desc[desc_id].addr,
194                                                 vq->desc[desc_id].len);
195                                 if (unlikely(!idesc))
196                                         return -1;
197
198                                 desc_ring = idesc;
199                         }
200
201                         desc_id = 0;
202                 }
203
204                 /* dirty page logging for DMA writeable buffer */
205                 do {
206                         if (unlikely(desc_id >= vq->size))
207                                 goto fail;
208                         if (unlikely(nr_descs-- == 0))
209                                 goto fail;
210                         desc = desc_ring[desc_id];
211                         if (desc.flags & VRING_DESC_F_WRITE)
212                                 vhost_log_write_iova(dev, vq, desc.addr,
213                                                      desc.len);
214                         desc_id = desc.next;
215                 } while (desc.flags & VRING_DESC_F_NEXT);
216
217                 if (unlikely(idesc)) {
218                         free_ind_table(idesc);
219                         idesc = NULL;
220                 }
221
222                 idx++;
223         }
224
225         rte_smp_wmb();
226         vq->used->idx = idx_m;
227
228         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
229                 vring_used_event(s_vring) = idx_m;
230
231         return ret;
232
233 fail:
234         if (unlikely(idesc))
235                 free_ind_table(idesc);
236         return -1;
237 }
238
239 int
240 rte_vdpa_get_stats_names(struct rte_vdpa_device *dev,
241                 struct rte_vdpa_stat_name *stats_names,
242                 unsigned int size)
243 {
244         if (!dev)
245                 return -EINVAL;
246
247         RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats_names, -ENOTSUP);
248
249         return dev->ops->get_stats_names(dev, stats_names, size);
250 }
251
252 int
253 rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid,
254                 struct rte_vdpa_stat *stats, unsigned int n)
255 {
256         if (!dev || !stats || !n)
257                 return -EINVAL;
258
259         RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats, -ENOTSUP);
260
261         return dev->ops->get_stats(dev, qid, stats, n);
262 }
263
264 int
265 rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid)
266 {
267         if (!dev)
268                 return -EINVAL;
269
270         RTE_FUNC_PTR_OR_ERR_RET(dev->ops->reset_stats, -ENOTSUP);
271
272         return dev->ops->reset_stats(dev, qid);
273 }
274
275 static uint16_t
276 vdpa_dev_to_id(const struct rte_vdpa_device *dev)
277 {
278         if (dev == NULL)
279                 return MAX_VHOST_DEVICE;
280
281         if (dev < &vdpa_devices[0] ||
282                         dev >= &vdpa_devices[MAX_VHOST_DEVICE])
283                 return MAX_VHOST_DEVICE;
284
285         return (uint16_t)(dev - vdpa_devices);
286 }
287
288 static int
289 vdpa_dev_match(struct rte_vdpa_device *dev,
290               const struct rte_device *rte_dev)
291 {
292         if (dev->device == rte_dev)
293                 return 0;
294
295         return -1;
296 }
297
298 /* Generic rte_vdpa_dev comparison function. */
299 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
300                 const struct rte_device *rte_dev);
301
302 static struct rte_vdpa_device *
303 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
304                 struct rte_device *rte_dev)
305 {
306         struct rte_vdpa_device *dev;
307         uint16_t idx;
308
309         if (start != NULL)
310                 idx = vdpa_dev_to_id(start) + 1;
311         else
312                 idx = 0;
313         for (; idx < MAX_VHOST_DEVICE; idx++) {
314                 dev = &vdpa_devices[idx];
315                 /*
316                  * ToDo: Certainly better to introduce a state field,
317                  * but rely on ops being set for now.
318                  */
319                 if (dev->ops == NULL)
320                         continue;
321                 if (cmp(dev, rte_dev) == 0)
322                         return dev;
323         }
324         return NULL;
325 }
326
327 static void *
328 vdpa_dev_iterate(const void *start,
329                 const char *str,
330                 const struct rte_dev_iterator *it)
331 {
332         struct rte_vdpa_device *vdpa_dev = NULL;
333
334         RTE_SET_USED(str);
335
336         vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
337
338         return vdpa_dev;
339 }
340
341 static struct rte_class rte_class_vdpa = {
342         .dev_iterate = vdpa_dev_iterate,
343 };
344
345 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);