bus/pci: fix Windows kernel driver categories
[dpdk.git] / drivers / net / virtio / virtio_user / vhost_vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Red Hat Inc.
3  */
4
5 #include <sys/ioctl.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <fcntl.h>
9 #include <unistd.h>
10
11 #include <rte_memory.h>
12
13 #include "vhost.h"
14 #include "virtio_user_dev.h"
15
16 struct vhost_vdpa_data {
17         int vhostfd;
18         uint64_t protocol_features;
19 };
20
21 #define VHOST_VDPA_SUPPORTED_BACKEND_FEATURES           \
22         (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2   |       \
23         1ULL << VHOST_BACKEND_F_IOTLB_BATCH)
24
25 /* vhost kernel & vdpa ioctls */
26 #define VHOST_VIRTIO 0xAF
27 #define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
28 #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
29 #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
30 #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
31 #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
32 #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
33 #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
34 #define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
35 #define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
36 #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
37 #define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
38 #define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
39 #define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
40 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
41 #define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
42 #define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
43 #define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
44 #define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, struct vhost_vring_state)
45 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
46 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
47
48 /* no alignment requirement */
49 struct vhost_iotlb_msg {
50         uint64_t iova;
51         uint64_t size;
52         uint64_t uaddr;
53 #define VHOST_ACCESS_RO      0x1
54 #define VHOST_ACCESS_WO      0x2
55 #define VHOST_ACCESS_RW      0x3
56         uint8_t perm;
57 #define VHOST_IOTLB_MISS           1
58 #define VHOST_IOTLB_UPDATE         2
59 #define VHOST_IOTLB_INVALIDATE     3
60 #define VHOST_IOTLB_ACCESS_FAIL    4
61 #define VHOST_IOTLB_BATCH_BEGIN    5
62 #define VHOST_IOTLB_BATCH_END      6
63         uint8_t type;
64 };
65
66 #define VHOST_IOTLB_MSG_V2 0x2
67
68 struct vhost_msg {
69         uint32_t type;
70         uint32_t reserved;
71         union {
72                 struct vhost_iotlb_msg iotlb;
73                 uint8_t padding[64];
74         };
75 };
76
77
78 static int
79 vhost_vdpa_ioctl(int fd, uint64_t request, void *arg)
80 {
81         int ret;
82
83         ret = ioctl(fd, request, arg);
84         if (ret) {
85                 PMD_DRV_LOG(ERR, "Vhost-vDPA ioctl %"PRIu64" failed (%s)",
86                                 request, strerror(errno));
87                 return -1;
88         }
89
90         return 0;
91 }
92
93 static int
94 vhost_vdpa_set_owner(struct virtio_user_dev *dev)
95 {
96         struct vhost_vdpa_data *data = dev->backend_data;
97
98         return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_OWNER, NULL);
99 }
100
101 static int
102 vhost_vdpa_get_protocol_features(struct virtio_user_dev *dev, uint64_t *features)
103 {
104         struct vhost_vdpa_data *data = dev->backend_data;
105
106         return vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_BACKEND_FEATURES, features);
107 }
108
109 static int
110 vhost_vdpa_set_protocol_features(struct virtio_user_dev *dev, uint64_t features)
111 {
112         struct vhost_vdpa_data *data = dev->backend_data;
113
114         return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_BACKEND_FEATURES, &features);
115 }
116
117 static int
118 vhost_vdpa_get_features(struct virtio_user_dev *dev, uint64_t *features)
119 {
120         struct vhost_vdpa_data *data = dev->backend_data;
121         int ret;
122
123         ret = vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_FEATURES, features);
124         if (ret) {
125                 PMD_DRV_LOG(ERR, "Failed to get features");
126                 return -1;
127         }
128
129         /* Multiqueue not supported for now */
130         *features &= ~(1ULL << VIRTIO_NET_F_MQ);
131
132         /* Negotiated vDPA backend features */
133         ret = vhost_vdpa_get_protocol_features(dev, &data->protocol_features);
134         if (ret < 0) {
135                 PMD_DRV_LOG(ERR, "Failed to get backend features");
136                 return -1;
137         }
138
139         data->protocol_features &= VHOST_VDPA_SUPPORTED_BACKEND_FEATURES;
140
141         ret = vhost_vdpa_set_protocol_features(dev, data->protocol_features);
142         if (ret < 0) {
143                 PMD_DRV_LOG(ERR, "Failed to set backend features");
144                 return -1;
145         }
146
147         return 0;
148 }
149
150 static int
151 vhost_vdpa_set_features(struct virtio_user_dev *dev, uint64_t features)
152 {
153         struct vhost_vdpa_data *data = dev->backend_data;
154
155         /* WORKAROUND */
156         features |= 1ULL << VIRTIO_F_IOMMU_PLATFORM;
157
158         return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_FEATURES, &features);
159 }
160
161 static int
162 vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
163 {
164         struct vhost_vdpa_data *data = dev->backend_data;
165         struct vhost_msg msg = {};
166
167         if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
168                 return 0;
169
170         if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
171                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
172                 return -1;
173         }
174
175         msg.type = VHOST_IOTLB_MSG_V2;
176         msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
177
178         if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
179                 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
180                                 strerror(errno));
181                 return -1;
182         }
183
184         return 0;
185 }
186
187 static int
188 vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
189 {
190         struct vhost_vdpa_data *data = dev->backend_data;
191         struct vhost_msg msg = {};
192
193         if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
194                 return 0;
195
196         if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
197                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
198                 return -1;
199         }
200
201         msg.type = VHOST_IOTLB_MSG_V2;
202         msg.iotlb.type = VHOST_IOTLB_BATCH_END;
203
204         if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
205                 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
206                                 strerror(errno));
207                 return -1;
208         }
209
210         return 0;
211 }
212
213 static int
214 vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
215                                   uint64_t iova, size_t len)
216 {
217         struct vhost_vdpa_data *data = dev->backend_data;
218         struct vhost_msg msg = {};
219
220         if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
221                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
222                 return -1;
223         }
224
225         msg.type = VHOST_IOTLB_MSG_V2;
226         msg.iotlb.type = VHOST_IOTLB_UPDATE;
227         msg.iotlb.iova = iova;
228         msg.iotlb.uaddr = (uint64_t)(uintptr_t)addr;
229         msg.iotlb.size = len;
230         msg.iotlb.perm = VHOST_ACCESS_RW;
231
232         PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", addr: %p, len: 0x%zx",
233                         __func__, iova, addr, len);
234
235         if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
236                 PMD_DRV_LOG(ERR, "Failed to send IOTLB update (%s)",
237                                 strerror(errno));
238                 return -1;
239         }
240
241         return 0;
242 }
243
244 static int
245 vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
246                                   uint64_t iova, size_t len)
247 {
248         struct vhost_vdpa_data *data = dev->backend_data;
249         struct vhost_msg msg = {};
250
251         if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
252                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
253                 return -1;
254         }
255
256         msg.type = VHOST_IOTLB_MSG_V2;
257         msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
258         msg.iotlb.iova = iova;
259         msg.iotlb.size = len;
260
261         PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", len: 0x%zx",
262                         __func__, iova, len);
263
264         if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
265                 PMD_DRV_LOG(ERR, "Failed to send IOTLB invalidate (%s)",
266                                 strerror(errno));
267                 return -1;
268         }
269
270         return 0;
271 }
272
273 static int
274 vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
275                                   uint64_t iova, size_t len)
276 {
277         int ret;
278
279         if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
280                 return -1;
281
282         ret = vhost_vdpa_dma_map(dev, addr, iova, len);
283
284         if (vhost_vdpa_iotlb_batch_end(dev) < 0)
285                 return -1;
286
287         return ret;
288 }
289
290 static int
291 vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
292                                   uint64_t iova, size_t len)
293 {
294         int ret;
295
296         if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
297                 return -1;
298
299         ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
300
301         if (vhost_vdpa_iotlb_batch_end(dev) < 0)
302                 return -1;
303
304         return ret;
305 }
306
307 static int
308 vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
309                 const struct rte_memseg *ms, size_t len, void *arg)
310 {
311         struct virtio_user_dev *dev = arg;
312
313         if (msl->external)
314                 return 0;
315
316         return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, len);
317 }
318
319 static int
320 vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
321                 void *arg)
322 {
323         struct virtio_user_dev *dev = arg;
324
325         /* skip external memory that isn't a heap */
326         if (msl->external && !msl->heap)
327                 return 0;
328
329         /* skip any segments with invalid IOVA addresses */
330         if (ms->iova == RTE_BAD_IOVA)
331                 return 0;
332
333         /* if IOVA mode is VA, we've already mapped the internal segments */
334         if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
335                 return 0;
336
337         return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, ms->len);
338 }
339
340 static int
341 vhost_vdpa_set_memory_table(struct virtio_user_dev *dev)
342 {
343         int ret;
344
345         if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
346                 return -1;
347
348         vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
349
350         if (rte_eal_iova_mode() == RTE_IOVA_VA) {
351                 /* with IOVA as VA mode, we can get away with mapping contiguous
352                  * chunks rather than going page-by-page.
353                  */
354                 ret = rte_memseg_contig_walk_thread_unsafe(
355                                 vhost_vdpa_map_contig, dev);
356                 if (ret)
357                         goto batch_end;
358                 /* we have to continue the walk because we've skipped the
359                  * external segments during the config walk.
360                  */
361         }
362         ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
363
364 batch_end:
365         if (vhost_vdpa_iotlb_batch_end(dev) < 0)
366                 return -1;
367
368         return ret;
369 }
370
371 static int
372 vhost_vdpa_set_vring_enable(struct virtio_user_dev *dev, struct vhost_vring_state *state)
373 {
374         struct vhost_vdpa_data *data = dev->backend_data;
375
376         return vhost_vdpa_ioctl(data->vhostfd, VHOST_VDPA_SET_VRING_ENABLE, state);
377 }
378
379 static int
380 vhost_vdpa_set_vring_num(struct virtio_user_dev *dev, struct vhost_vring_state *state)
381 {
382         struct vhost_vdpa_data *data = dev->backend_data;
383
384         return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_NUM, state);
385 }
386
387 static int
388 vhost_vdpa_set_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
389 {
390         struct vhost_vdpa_data *data = dev->backend_data;
391
392         return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_BASE, state);
393 }
394
395 static int
396 vhost_vdpa_get_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
397 {
398         struct vhost_vdpa_data *data = dev->backend_data;
399
400         return vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_VRING_BASE, state);
401 }
402
403 static int
404 vhost_vdpa_set_vring_call(struct virtio_user_dev *dev, struct vhost_vring_file *file)
405 {
406         struct vhost_vdpa_data *data = dev->backend_data;
407
408         return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_CALL, file);
409 }
410
411 static int
412 vhost_vdpa_set_vring_kick(struct virtio_user_dev *dev, struct vhost_vring_file *file)
413 {
414         struct vhost_vdpa_data *data = dev->backend_data;
415
416         return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_KICK, file);
417 }
418
419 static int
420 vhost_vdpa_set_vring_addr(struct virtio_user_dev *dev, struct vhost_vring_addr *addr)
421 {
422         struct vhost_vdpa_data *data = dev->backend_data;
423
424         return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_ADDR, addr);
425 }
426
427 static int
428 vhost_vdpa_get_status(struct virtio_user_dev *dev, uint8_t *status)
429 {
430         struct vhost_vdpa_data *data = dev->backend_data;
431
432         return vhost_vdpa_ioctl(data->vhostfd, VHOST_VDPA_GET_STATUS, status);
433 }
434
435 static int
436 vhost_vdpa_set_status(struct virtio_user_dev *dev, uint8_t status)
437 {
438         struct vhost_vdpa_data *data = dev->backend_data;
439
440         return vhost_vdpa_ioctl(data->vhostfd, VHOST_VDPA_SET_STATUS, &status);
441 }
442
443 /**
444  * Set up environment to talk with a vhost vdpa backend.
445  *
446  * @return
447  *   - (-1) if fail to set up;
448  *   - (>=0) if successful.
449  */
450 static int
451 vhost_vdpa_setup(struct virtio_user_dev *dev)
452 {
453         struct vhost_vdpa_data *data;
454         uint32_t did = (uint32_t)-1;
455
456         data = malloc(sizeof(*data));
457         if (!data) {
458                 PMD_DRV_LOG(ERR, "(%s) Faidle to allocate backend data", dev->path);
459                 return -1;
460         }
461
462         data->vhostfd = open(dev->path, O_RDWR);
463         if (data->vhostfd < 0) {
464                 PMD_DRV_LOG(ERR, "Failed to open %s: %s\n",
465                                 dev->path, strerror(errno));
466                 free(data);
467                 return -1;
468         }
469
470         if (ioctl(data->vhostfd, VHOST_VDPA_GET_DEVICE_ID, &did) < 0 ||
471                         did != VIRTIO_ID_NETWORK) {
472                 PMD_DRV_LOG(ERR, "Invalid vdpa device ID: %u\n", did);
473                 close(data->vhostfd);
474                 free(data);
475                 return -1;
476         }
477
478         dev->backend_data = data;
479
480         return 0;
481 }
482
483 static int
484 vhost_vdpa_destroy(struct virtio_user_dev *dev)
485 {
486         struct vhost_vdpa_data *data = dev->backend_data;
487
488         if (!data)
489                 return 0;
490
491         close(data->vhostfd);
492
493         free(data);
494         dev->backend_data = NULL;
495
496         return 0;
497 }
498
499 static int
500 vhost_vdpa_enable_queue_pair(struct virtio_user_dev *dev,
501                                uint16_t pair_idx,
502                                int enable)
503 {
504         int i;
505
506         if (dev->qp_enabled[pair_idx] == enable)
507                 return 0;
508
509         for (i = 0; i < 2; ++i) {
510                 struct vhost_vring_state state = {
511                         .index = pair_idx * 2 + i,
512                         .num   = enable,
513                 };
514
515                 if (vhost_vdpa_set_vring_enable(dev, &state))
516                         return -1;
517         }
518
519         dev->qp_enabled[pair_idx] = enable;
520
521         return 0;
522 }
523
524 static int
525 vhost_vdpa_get_backend_features(uint64_t *features)
526 {
527         *features = 0;
528
529         return 0;
530 }
531
532 static int
533 vhost_vdpa_update_link_state(struct virtio_user_dev *dev __rte_unused)
534 {
535         /* Nothing to update (for now?) */
536         return 0;
537 }
538
539 static int
540 vhost_vdpa_get_intr_fd(struct virtio_user_dev *dev __rte_unused)
541 {
542         /* No link state interrupt with Vhost-vDPA */
543         return -1;
544 }
545
546 struct virtio_user_backend_ops virtio_ops_vdpa = {
547         .setup = vhost_vdpa_setup,
548         .destroy = vhost_vdpa_destroy,
549         .get_backend_features = vhost_vdpa_get_backend_features,
550         .set_owner = vhost_vdpa_set_owner,
551         .get_features = vhost_vdpa_get_features,
552         .set_features = vhost_vdpa_set_features,
553         .set_memory_table = vhost_vdpa_set_memory_table,
554         .set_vring_num = vhost_vdpa_set_vring_num,
555         .set_vring_base = vhost_vdpa_set_vring_base,
556         .get_vring_base = vhost_vdpa_get_vring_base,
557         .set_vring_call = vhost_vdpa_set_vring_call,
558         .set_vring_kick = vhost_vdpa_set_vring_kick,
559         .set_vring_addr = vhost_vdpa_set_vring_addr,
560         .get_status = vhost_vdpa_get_status,
561         .set_status = vhost_vdpa_set_status,
562         .enable_qp = vhost_vdpa_enable_queue_pair,
563         .dma_map = vhost_vdpa_dma_map_batch,
564         .dma_unmap = vhost_vdpa_dma_unmap_batch,
565         .update_link_state = vhost_vdpa_update_link_state,
566         .get_intr_fd = vhost_vdpa_get_intr_fd,
567 };