49ada002773431f84161e8b6d670f4ccd878f5a9
[dpdk.git] / lib / librte_vhost / vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 /**
6  * @file
7  *
8  * Device specific vhost lib
9  */
10
11 #include <stdbool.h>
12
13 #include <rte_class.h>
14 #include <rte_malloc.h>
15 #include "rte_vdpa.h"
16 #include "vhost.h"
17
18 static struct rte_vdpa_device vdpa_devices[MAX_VHOST_DEVICE];
19 static uint32_t vdpa_device_num;
20
21
22 int
23 rte_vdpa_find_device_id(struct rte_vdpa_device *dev)
24 {
25         struct rte_vdpa_device *tmp_dev;
26         int i;
27
28         if (dev == NULL)
29                 return -1;
30
31         for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
32                 tmp_dev = &vdpa_devices[i];
33                 if (tmp_dev->ops == NULL)
34                         continue;
35
36                 if (tmp_dev == dev)
37                         return i;
38         }
39
40         return -1;
41 }
42
43 int
44 rte_vdpa_find_device_id_by_name(const char *name)
45 {
46         struct rte_vdpa_device *dev;
47         int i;
48
49         if (name == NULL)
50                 return -1;
51
52         for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
53                 dev = &vdpa_devices[i];
54                 if (dev->ops == NULL)
55                         continue;
56
57                 if (strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN) == 0)
58                         return i;
59         }
60
61         return -1;
62 }
63
64 struct rte_vdpa_device *
65 rte_vdpa_get_device(int did)
66 {
67         if (did < 0 || did >= MAX_VHOST_DEVICE)
68                 return NULL;
69
70         return &vdpa_devices[did];
71 }
72
73 struct rte_vdpa_device *
74 rte_vdpa_register_device(struct rte_device *rte_dev,
75                 struct rte_vdpa_dev_ops *ops)
76 {
77         struct rte_vdpa_device *dev;
78         int i;
79
80         if (vdpa_device_num >= MAX_VHOST_DEVICE || ops == NULL)
81                 return NULL;
82
83         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
84                 dev = &vdpa_devices[i];
85                 if (dev->ops == NULL)
86                         continue;
87
88                 if (dev->device == rte_dev)
89                         return NULL;
90         }
91
92         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
93                 if (vdpa_devices[i].ops == NULL)
94                         break;
95         }
96
97         if (i == MAX_VHOST_DEVICE)
98                 return NULL;
99
100         dev = &vdpa_devices[i];
101         dev->device = rte_dev;
102         dev->ops = ops;
103         vdpa_device_num++;
104
105         return dev;
106 }
107
108 int
109 rte_vdpa_unregister_device(struct rte_vdpa_device *vdev)
110 {
111         int did = rte_vdpa_find_device_id(vdev);
112
113         if (did < 0 || vdpa_devices[did].ops == NULL)
114                 return -1;
115
116         memset(&vdpa_devices[did], 0, sizeof(struct rte_vdpa_device));
117         vdpa_device_num--;
118
119         return 0;
120 }
121
122 int
123 rte_vdpa_get_device_num(void)
124 {
125         return vdpa_device_num;
126 }
127
128 int
129 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
130 {
131         struct virtio_net *dev = get_device(vid);
132         uint16_t idx, idx_m, desc_id;
133         struct vhost_virtqueue *vq;
134         struct vring_desc desc;
135         struct vring_desc *desc_ring;
136         struct vring_desc *idesc = NULL;
137         struct vring *s_vring;
138         uint64_t dlen;
139         uint32_t nr_descs;
140         int ret;
141
142         if (!dev || !vring_m)
143                 return -1;
144
145         if (qid >= dev->nr_vring)
146                 return -1;
147
148         if (vq_is_packed(dev))
149                 return -1;
150
151         s_vring = (struct vring *)vring_m;
152         vq = dev->virtqueue[qid];
153         idx = vq->used->idx;
154         idx_m = s_vring->used->idx;
155         ret = (uint16_t)(idx_m - idx);
156
157         while (idx != idx_m) {
158                 /* copy used entry, used ring logging is not covered here */
159                 vq->used->ring[idx & (vq->size - 1)] =
160                         s_vring->used->ring[idx & (vq->size - 1)];
161
162                 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
163                 desc_ring = vq->desc;
164                 nr_descs = vq->size;
165
166                 if (unlikely(desc_id >= vq->size))
167                         return -1;
168
169                 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
170                         dlen = vq->desc[desc_id].len;
171                         nr_descs = dlen / sizeof(struct vring_desc);
172                         if (unlikely(nr_descs > vq->size))
173                                 return -1;
174
175                         desc_ring = (struct vring_desc *)(uintptr_t)
176                                 vhost_iova_to_vva(dev, vq,
177                                                 vq->desc[desc_id].addr, &dlen,
178                                                 VHOST_ACCESS_RO);
179                         if (unlikely(!desc_ring))
180                                 return -1;
181
182                         if (unlikely(dlen < vq->desc[desc_id].len)) {
183                                 idesc = vhost_alloc_copy_ind_table(dev, vq,
184                                                 vq->desc[desc_id].addr,
185                                                 vq->desc[desc_id].len);
186                                 if (unlikely(!idesc))
187                                         return -1;
188
189                                 desc_ring = idesc;
190                         }
191
192                         desc_id = 0;
193                 }
194
195                 /* dirty page logging for DMA writeable buffer */
196                 do {
197                         if (unlikely(desc_id >= vq->size))
198                                 goto fail;
199                         if (unlikely(nr_descs-- == 0))
200                                 goto fail;
201                         desc = desc_ring[desc_id];
202                         if (desc.flags & VRING_DESC_F_WRITE)
203                                 vhost_log_write_iova(dev, vq, desc.addr,
204                                                      desc.len);
205                         desc_id = desc.next;
206                 } while (desc.flags & VRING_DESC_F_NEXT);
207
208                 if (unlikely(idesc)) {
209                         free_ind_table(idesc);
210                         idesc = NULL;
211                 }
212
213                 idx++;
214         }
215
216         rte_smp_wmb();
217         vq->used->idx = idx_m;
218
219         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
220                 vring_used_event(s_vring) = idx_m;
221
222         return ret;
223
224 fail:
225         if (unlikely(idesc))
226                 free_ind_table(idesc);
227         return -1;
228 }
229
230 int
231 rte_vdpa_get_stats_names(int did, struct rte_vdpa_stat_name *stats_names,
232                          unsigned int size)
233 {
234         struct rte_vdpa_device *vdpa_dev;
235
236         vdpa_dev = rte_vdpa_get_device(did);
237         if (!vdpa_dev)
238                 return -ENODEV;
239
240         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats_names, -ENOTSUP);
241
242         return vdpa_dev->ops->get_stats_names(vdpa_dev, stats_names, size);
243 }
244
245 int
246 rte_vdpa_get_stats(int did, uint16_t qid, struct rte_vdpa_stat *stats,
247                    unsigned int n)
248 {
249         struct rte_vdpa_device *vdpa_dev;
250
251         vdpa_dev = rte_vdpa_get_device(did);
252         if (!vdpa_dev)
253                 return -ENODEV;
254
255         if (!stats || !n)
256                 return -EINVAL;
257
258         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats, -ENOTSUP);
259
260         return vdpa_dev->ops->get_stats(vdpa_dev, qid, stats, n);
261 }
262
263 int
264 rte_vdpa_reset_stats(int did, uint16_t qid)
265 {
266         struct rte_vdpa_device *vdpa_dev;
267
268         vdpa_dev = rte_vdpa_get_device(did);
269         if (!vdpa_dev)
270                 return -ENODEV;
271
272         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->reset_stats, -ENOTSUP);
273
274         return vdpa_dev->ops->reset_stats(vdpa_dev, qid);
275 }
276
277 static uint16_t
278 vdpa_dev_to_id(const struct rte_vdpa_device *dev)
279 {
280         if (dev == NULL)
281                 return MAX_VHOST_DEVICE;
282
283         if (dev < &vdpa_devices[0] ||
284                         dev >= &vdpa_devices[MAX_VHOST_DEVICE])
285                 return MAX_VHOST_DEVICE;
286
287         return (uint16_t)(dev - vdpa_devices);
288 }
289
290 static int
291 vdpa_dev_match(struct rte_vdpa_device *dev,
292               const struct rte_device *rte_dev)
293 {
294         if (dev->device == rte_dev)
295                 return 0;
296
297         return -1;
298 }
299
300 /* Generic rte_vdpa_dev comparison function. */
301 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
302                 const struct rte_device *rte_dev);
303
304 static struct rte_vdpa_device *
305 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
306                 struct rte_device *rte_dev)
307 {
308         struct rte_vdpa_device *dev;
309         uint16_t idx;
310
311         if (start != NULL)
312                 idx = vdpa_dev_to_id(start) + 1;
313         else
314                 idx = 0;
315         for (; idx < MAX_VHOST_DEVICE; idx++) {
316                 dev = &vdpa_devices[idx];
317                 /*
318                  * ToDo: Certainly better to introduce a state field,
319                  * but rely on ops being set for now.
320                  */
321                 if (dev->ops == NULL)
322                         continue;
323                 if (cmp(dev, rte_dev) == 0)
324                         return dev;
325         }
326         return NULL;
327 }
328
329 static void *
330 vdpa_dev_iterate(const void *start,
331                 const char *str,
332                 const struct rte_dev_iterator *it)
333 {
334         struct rte_vdpa_device *vdpa_dev = NULL;
335
336         RTE_SET_USED(str);
337
338         vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
339
340         return vdpa_dev;
341 }
342
343 static struct rte_class rte_class_vdpa = {
344         .dev_iterate = vdpa_dev_iterate,
345 };
346
347 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);