vhost: make vDPA framework bus agnostic
[dpdk.git] / lib / librte_vhost / vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 /**
6  * @file
7  *
8  * Device specific vhost lib
9  */
10
11 #include <stdbool.h>
12
13 #include <rte_class.h>
14 #include <rte_malloc.h>
15 #include "rte_vdpa.h"
16 #include "vhost.h"
17
18 static struct rte_vdpa_device vdpa_devices[MAX_VHOST_DEVICE];
19 static uint32_t vdpa_device_num;
20
21 int
22 rte_vdpa_register_device(struct rte_device *rte_dev,
23                 struct rte_vdpa_dev_ops *ops)
24 {
25         struct rte_vdpa_device *dev;
26         int i;
27
28         if (vdpa_device_num >= MAX_VHOST_DEVICE || ops == NULL)
29                 return -1;
30
31         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
32                 dev = &vdpa_devices[i];
33                 if (dev->ops == NULL)
34                         continue;
35
36                 if (dev->device == rte_dev)
37                         return -1;
38         }
39
40         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
41                 if (vdpa_devices[i].ops == NULL)
42                         break;
43         }
44
45         if (i == MAX_VHOST_DEVICE)
46                 return -1;
47
48         dev = &vdpa_devices[i];
49         dev->device = rte_dev;
50         dev->ops = ops;
51         vdpa_device_num++;
52
53         return i;
54 }
55
56 int
57 rte_vdpa_unregister_device(int did)
58 {
59         if (did < 0 || did >= MAX_VHOST_DEVICE || vdpa_devices[did].ops == NULL)
60                 return -1;
61
62         memset(&vdpa_devices[did], 0, sizeof(struct rte_vdpa_device));
63         vdpa_device_num--;
64
65         return did;
66 }
67
68 int
69 rte_vdpa_find_device_id(struct rte_vdpa_device *dev)
70 {
71         struct rte_vdpa_device *tmp_dev;
72         int i;
73
74         if (dev == NULL)
75                 return -1;
76
77         for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
78                 tmp_dev = &vdpa_devices[i];
79                 if (tmp_dev->ops == NULL)
80                         continue;
81
82                 if (tmp_dev == dev)
83                         return i;
84         }
85
86         return -1;
87 }
88
89 int
90 rte_vdpa_find_device_id_by_name(const char *name)
91 {
92         struct rte_vdpa_device *dev;
93         int i;
94
95         if (name == NULL)
96                 return -1;
97
98         for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
99                 dev = &vdpa_devices[i];
100                 if (dev->ops == NULL)
101                         continue;
102
103                 if (strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN) == 0)
104                         return i;
105         }
106
107         return -1;
108 }
109
110 struct rte_vdpa_device *
111 rte_vdpa_get_device(int did)
112 {
113         if (did < 0 || did >= MAX_VHOST_DEVICE)
114                 return NULL;
115
116         return &vdpa_devices[did];
117 }
118
119 int
120 rte_vdpa_get_device_num(void)
121 {
122         return vdpa_device_num;
123 }
124
125 int
126 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
127 {
128         struct virtio_net *dev = get_device(vid);
129         uint16_t idx, idx_m, desc_id;
130         struct vhost_virtqueue *vq;
131         struct vring_desc desc;
132         struct vring_desc *desc_ring;
133         struct vring_desc *idesc = NULL;
134         struct vring *s_vring;
135         uint64_t dlen;
136         uint32_t nr_descs;
137         int ret;
138
139         if (!dev || !vring_m)
140                 return -1;
141
142         if (qid >= dev->nr_vring)
143                 return -1;
144
145         if (vq_is_packed(dev))
146                 return -1;
147
148         s_vring = (struct vring *)vring_m;
149         vq = dev->virtqueue[qid];
150         idx = vq->used->idx;
151         idx_m = s_vring->used->idx;
152         ret = (uint16_t)(idx_m - idx);
153
154         while (idx != idx_m) {
155                 /* copy used entry, used ring logging is not covered here */
156                 vq->used->ring[idx & (vq->size - 1)] =
157                         s_vring->used->ring[idx & (vq->size - 1)];
158
159                 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
160                 desc_ring = vq->desc;
161                 nr_descs = vq->size;
162
163                 if (unlikely(desc_id >= vq->size))
164                         return -1;
165
166                 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
167                         dlen = vq->desc[desc_id].len;
168                         nr_descs = dlen / sizeof(struct vring_desc);
169                         if (unlikely(nr_descs > vq->size))
170                                 return -1;
171
172                         desc_ring = (struct vring_desc *)(uintptr_t)
173                                 vhost_iova_to_vva(dev, vq,
174                                                 vq->desc[desc_id].addr, &dlen,
175                                                 VHOST_ACCESS_RO);
176                         if (unlikely(!desc_ring))
177                                 return -1;
178
179                         if (unlikely(dlen < vq->desc[desc_id].len)) {
180                                 idesc = vhost_alloc_copy_ind_table(dev, vq,
181                                                 vq->desc[desc_id].addr,
182                                                 vq->desc[desc_id].len);
183                                 if (unlikely(!idesc))
184                                         return -1;
185
186                                 desc_ring = idesc;
187                         }
188
189                         desc_id = 0;
190                 }
191
192                 /* dirty page logging for DMA writeable buffer */
193                 do {
194                         if (unlikely(desc_id >= vq->size))
195                                 goto fail;
196                         if (unlikely(nr_descs-- == 0))
197                                 goto fail;
198                         desc = desc_ring[desc_id];
199                         if (desc.flags & VRING_DESC_F_WRITE)
200                                 vhost_log_write_iova(dev, vq, desc.addr,
201                                                      desc.len);
202                         desc_id = desc.next;
203                 } while (desc.flags & VRING_DESC_F_NEXT);
204
205                 if (unlikely(idesc)) {
206                         free_ind_table(idesc);
207                         idesc = NULL;
208                 }
209
210                 idx++;
211         }
212
213         rte_smp_wmb();
214         vq->used->idx = idx_m;
215
216         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
217                 vring_used_event(s_vring) = idx_m;
218
219         return ret;
220
221 fail:
222         if (unlikely(idesc))
223                 free_ind_table(idesc);
224         return -1;
225 }
226
227 int
228 rte_vdpa_get_stats_names(int did, struct rte_vdpa_stat_name *stats_names,
229                          unsigned int size)
230 {
231         struct rte_vdpa_device *vdpa_dev;
232
233         vdpa_dev = rte_vdpa_get_device(did);
234         if (!vdpa_dev)
235                 return -ENODEV;
236
237         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats_names, -ENOTSUP);
238
239         return vdpa_dev->ops->get_stats_names(did, stats_names, size);
240 }
241
242 int
243 rte_vdpa_get_stats(int did, uint16_t qid, struct rte_vdpa_stat *stats,
244                    unsigned int n)
245 {
246         struct rte_vdpa_device *vdpa_dev;
247
248         vdpa_dev = rte_vdpa_get_device(did);
249         if (!vdpa_dev)
250                 return -ENODEV;
251
252         if (!stats || !n)
253                 return -EINVAL;
254
255         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats, -ENOTSUP);
256
257         return vdpa_dev->ops->get_stats(did, qid, stats, n);
258 }
259
260 int
261 rte_vdpa_reset_stats(int did, uint16_t qid)
262 {
263         struct rte_vdpa_device *vdpa_dev;
264
265         vdpa_dev = rte_vdpa_get_device(did);
266         if (!vdpa_dev)
267                 return -ENODEV;
268
269         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->reset_stats, -ENOTSUP);
270
271         return vdpa_dev->ops->reset_stats(did, qid);
272 }
273
274 static uint16_t
275 vdpa_dev_to_id(const struct rte_vdpa_device *dev)
276 {
277         if (dev == NULL)
278                 return MAX_VHOST_DEVICE;
279
280         if (dev < &vdpa_devices[0] ||
281                         dev >= &vdpa_devices[MAX_VHOST_DEVICE])
282                 return MAX_VHOST_DEVICE;
283
284         return (uint16_t)(dev - vdpa_devices);
285 }
286
287 static int
288 vdpa_dev_match(struct rte_vdpa_device *dev,
289               const struct rte_device *rte_dev)
290 {
291         if (dev->device == rte_dev)
292                 return 0;
293
294         return -1;
295 }
296
297 /* Generic rte_vdpa_dev comparison function. */
298 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
299                 const struct rte_device *rte_dev);
300
301 static struct rte_vdpa_device *
302 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
303                 struct rte_device *rte_dev)
304 {
305         struct rte_vdpa_device *dev;
306         uint16_t idx;
307
308         if (start != NULL)
309                 idx = vdpa_dev_to_id(start) + 1;
310         else
311                 idx = 0;
312         for (; idx < MAX_VHOST_DEVICE; idx++) {
313                 dev = &vdpa_devices[idx];
314                 /*
315                  * ToDo: Certainly better to introduce a state field,
316                  * but rely on ops being set for now.
317                  */
318                 if (dev->ops == NULL)
319                         continue;
320                 if (cmp(dev, rte_dev) == 0)
321                         return dev;
322         }
323         return NULL;
324 }
325
326 static void *
327 vdpa_dev_iterate(const void *start,
328                 const char *str,
329                 const struct rte_dev_iterator *it)
330 {
331         struct rte_vdpa_device *vdpa_dev = NULL;
332
333         RTE_SET_USED(str);
334
335         vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
336
337         return vdpa_dev;
338 }
339
340 static struct rte_class rte_class_vdpa = {
341         .dev_iterate = vdpa_dev_iterate,
342 };
343
344 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);