vhost: remove useless vDPA API
[dpdk.git] / lib / librte_vhost / vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 /**
6  * @file
7  *
8  * Device specific vhost lib
9  */
10
11 #include <stdbool.h>
12
13 #include <rte_class.h>
14 #include <rte_malloc.h>
15 #include "rte_vdpa.h"
16 #include "vhost.h"
17
18 static struct rte_vdpa_device vdpa_devices[MAX_VHOST_DEVICE];
19 static uint32_t vdpa_device_num;
20
21
22 struct rte_vdpa_device *
23 rte_vdpa_find_device_by_name(const char *name)
24 {
25         struct rte_vdpa_device *dev;
26         int i;
27
28         if (name == NULL)
29                 return NULL;
30
31         for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
32                 dev = &vdpa_devices[i];
33                 if (dev->ops == NULL)
34                         continue;
35
36                 if (strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN) == 0)
37                         return dev;
38         }
39
40         return NULL;
41 }
42
43 struct rte_device *
44 rte_vdpa_get_rte_device(struct rte_vdpa_device *vdpa_dev)
45 {
46         if (vdpa_dev == NULL)
47                 return NULL;
48
49         return vdpa_dev->device;
50 }
51
52 struct rte_vdpa_device *
53 rte_vdpa_register_device(struct rte_device *rte_dev,
54                 struct rte_vdpa_dev_ops *ops)
55 {
56         struct rte_vdpa_device *dev;
57         int i;
58
59         if (vdpa_device_num >= MAX_VHOST_DEVICE || ops == NULL)
60                 return NULL;
61
62         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
63                 dev = &vdpa_devices[i];
64                 if (dev->ops == NULL)
65                         continue;
66
67                 if (dev->device == rte_dev)
68                         return NULL;
69         }
70
71         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
72                 if (vdpa_devices[i].ops == NULL)
73                         break;
74         }
75
76         if (i == MAX_VHOST_DEVICE)
77                 return NULL;
78
79         dev = &vdpa_devices[i];
80         dev->device = rte_dev;
81         dev->ops = ops;
82         vdpa_device_num++;
83
84         return dev;
85 }
86
87 int
88 rte_vdpa_unregister_device(struct rte_vdpa_device *vdev)
89 {
90         int i;
91
92         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
93                 if (vdev != &vdpa_devices[i])
94                         continue;
95
96                 memset(vdev, 0, sizeof(struct rte_vdpa_device));
97                 vdpa_device_num--;
98
99                 return 0;
100         }
101
102         return -1;
103 }
104
105 int
106 rte_vdpa_get_device_num(void)
107 {
108         return vdpa_device_num;
109 }
110
111 int
112 rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m)
113 {
114         struct virtio_net *dev = get_device(vid);
115         uint16_t idx, idx_m, desc_id;
116         struct vhost_virtqueue *vq;
117         struct vring_desc desc;
118         struct vring_desc *desc_ring;
119         struct vring_desc *idesc = NULL;
120         struct vring *s_vring;
121         uint64_t dlen;
122         uint32_t nr_descs;
123         int ret;
124
125         if (!dev || !vring_m)
126                 return -1;
127
128         if (qid >= dev->nr_vring)
129                 return -1;
130
131         if (vq_is_packed(dev))
132                 return -1;
133
134         s_vring = (struct vring *)vring_m;
135         vq = dev->virtqueue[qid];
136         idx = vq->used->idx;
137         idx_m = s_vring->used->idx;
138         ret = (uint16_t)(idx_m - idx);
139
140         while (idx != idx_m) {
141                 /* copy used entry, used ring logging is not covered here */
142                 vq->used->ring[idx & (vq->size - 1)] =
143                         s_vring->used->ring[idx & (vq->size - 1)];
144
145                 desc_id = vq->used->ring[idx & (vq->size - 1)].id;
146                 desc_ring = vq->desc;
147                 nr_descs = vq->size;
148
149                 if (unlikely(desc_id >= vq->size))
150                         return -1;
151
152                 if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
153                         dlen = vq->desc[desc_id].len;
154                         nr_descs = dlen / sizeof(struct vring_desc);
155                         if (unlikely(nr_descs > vq->size))
156                                 return -1;
157
158                         desc_ring = (struct vring_desc *)(uintptr_t)
159                                 vhost_iova_to_vva(dev, vq,
160                                                 vq->desc[desc_id].addr, &dlen,
161                                                 VHOST_ACCESS_RO);
162                         if (unlikely(!desc_ring))
163                                 return -1;
164
165                         if (unlikely(dlen < vq->desc[desc_id].len)) {
166                                 idesc = vhost_alloc_copy_ind_table(dev, vq,
167                                                 vq->desc[desc_id].addr,
168                                                 vq->desc[desc_id].len);
169                                 if (unlikely(!idesc))
170                                         return -1;
171
172                                 desc_ring = idesc;
173                         }
174
175                         desc_id = 0;
176                 }
177
178                 /* dirty page logging for DMA writeable buffer */
179                 do {
180                         if (unlikely(desc_id >= vq->size))
181                                 goto fail;
182                         if (unlikely(nr_descs-- == 0))
183                                 goto fail;
184                         desc = desc_ring[desc_id];
185                         if (desc.flags & VRING_DESC_F_WRITE)
186                                 vhost_log_write_iova(dev, vq, desc.addr,
187                                                      desc.len);
188                         desc_id = desc.next;
189                 } while (desc.flags & VRING_DESC_F_NEXT);
190
191                 if (unlikely(idesc)) {
192                         free_ind_table(idesc);
193                         idesc = NULL;
194                 }
195
196                 idx++;
197         }
198
199         rte_smp_wmb();
200         vq->used->idx = idx_m;
201
202         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
203                 vring_used_event(s_vring) = idx_m;
204
205         return ret;
206
207 fail:
208         if (unlikely(idesc))
209                 free_ind_table(idesc);
210         return -1;
211 }
212
213 int
214 rte_vdpa_get_stats_names(struct rte_vdpa_device *dev,
215                 struct rte_vdpa_stat_name *stats_names,
216                 unsigned int size)
217 {
218         if (!dev)
219                 return -EINVAL;
220
221         RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats_names, -ENOTSUP);
222
223         return dev->ops->get_stats_names(dev, stats_names, size);
224 }
225
226 int
227 rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid,
228                 struct rte_vdpa_stat *stats, unsigned int n)
229 {
230         if (!dev || !stats || !n)
231                 return -EINVAL;
232
233         RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats, -ENOTSUP);
234
235         return dev->ops->get_stats(dev, qid, stats, n);
236 }
237
238 int
239 rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid)
240 {
241         if (!dev)
242                 return -EINVAL;
243
244         RTE_FUNC_PTR_OR_ERR_RET(dev->ops->reset_stats, -ENOTSUP);
245
246         return dev->ops->reset_stats(dev, qid);
247 }
248
249 static uint16_t
250 vdpa_dev_to_id(const struct rte_vdpa_device *dev)
251 {
252         if (dev == NULL)
253                 return MAX_VHOST_DEVICE;
254
255         if (dev < &vdpa_devices[0] ||
256                         dev >= &vdpa_devices[MAX_VHOST_DEVICE])
257                 return MAX_VHOST_DEVICE;
258
259         return (uint16_t)(dev - vdpa_devices);
260 }
261
262 static int
263 vdpa_dev_match(struct rte_vdpa_device *dev,
264               const struct rte_device *rte_dev)
265 {
266         if (dev->device == rte_dev)
267                 return 0;
268
269         return -1;
270 }
271
272 /* Generic rte_vdpa_dev comparison function. */
273 typedef int (*rte_vdpa_cmp_t)(struct rte_vdpa_device *,
274                 const struct rte_device *rte_dev);
275
276 static struct rte_vdpa_device *
277 vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp,
278                 struct rte_device *rte_dev)
279 {
280         struct rte_vdpa_device *dev;
281         uint16_t idx;
282
283         if (start != NULL)
284                 idx = vdpa_dev_to_id(start) + 1;
285         else
286                 idx = 0;
287         for (; idx < MAX_VHOST_DEVICE; idx++) {
288                 dev = &vdpa_devices[idx];
289                 /*
290                  * ToDo: Certainly better to introduce a state field,
291                  * but rely on ops being set for now.
292                  */
293                 if (dev->ops == NULL)
294                         continue;
295                 if (cmp(dev, rte_dev) == 0)
296                         return dev;
297         }
298         return NULL;
299 }
300
301 static void *
302 vdpa_dev_iterate(const void *start,
303                 const char *str,
304                 const struct rte_dev_iterator *it)
305 {
306         struct rte_vdpa_device *vdpa_dev = NULL;
307
308         RTE_SET_USED(str);
309
310         vdpa_dev = vdpa_find_device(start, vdpa_dev_match, it->device);
311
312         return vdpa_dev;
313 }
314
315 static struct rte_class rte_class_vdpa = {
316         .dev_iterate = vdpa_dev_iterate,
317 };
318
319 RTE_REGISTER_CLASS(vdpa, rte_class_vdpa);