323751093a35b8e45ddada63a10e62552a99a305
[dpdk.git] / drivers / net / virtio / virtio_user / vhost_vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Red Hat Inc.
3  */
4
5 #include <sys/ioctl.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <fcntl.h>
9 #include <unistd.h>
10
11 #include <rte_memory.h>
12
13 #include "vhost.h"
14 #include "virtio_user_dev.h"
15
16 /* vhost kernel & vdpa ioctls */
17 #define VHOST_VIRTIO 0xAF
18 #define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
19 #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
20 #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
21 #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
22 #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
23 #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
24 #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
25 #define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
26 #define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
27 #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
28 #define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
29 #define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
30 #define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
31 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
32 #define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
33 #define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
34 #define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
35 #define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, struct vhost_vring_state)
36 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
37 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
38
39 static uint64_t vhost_req_user_to_vdpa[] = {
40         [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
41         [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
42         [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
43         [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
44         [VHOST_USER_SET_STATUS] = VHOST_VDPA_SET_STATUS,
45         [VHOST_USER_GET_STATUS] = VHOST_VDPA_GET_STATUS,
46 };
47
48 /* no alignment requirement */
49 struct vhost_iotlb_msg {
50         uint64_t iova;
51         uint64_t size;
52         uint64_t uaddr;
53 #define VHOST_ACCESS_RO      0x1
54 #define VHOST_ACCESS_WO      0x2
55 #define VHOST_ACCESS_RW      0x3
56         uint8_t perm;
57 #define VHOST_IOTLB_MISS           1
58 #define VHOST_IOTLB_UPDATE         2
59 #define VHOST_IOTLB_INVALIDATE     3
60 #define VHOST_IOTLB_ACCESS_FAIL    4
61 #define VHOST_IOTLB_BATCH_BEGIN    5
62 #define VHOST_IOTLB_BATCH_END      6
63         uint8_t type;
64 };
65
66 #define VHOST_IOTLB_MSG_V2 0x2
67
68 struct vhost_msg {
69         uint32_t type;
70         uint32_t reserved;
71         union {
72                 struct vhost_iotlb_msg iotlb;
73                 uint8_t padding[64];
74         };
75 };
76
77
78 static int
79 vhost_vdpa_ioctl(int fd, uint64_t request, void *arg)
80 {
81         int ret;
82
83         ret = ioctl(fd, request, arg);
84         if (ret) {
85                 PMD_DRV_LOG(ERR, "Vhost-vDPA ioctl %"PRIu64" failed (%s)",
86                                 request, strerror(errno));
87                 return -1;
88         }
89
90         return 0;
91 }
92
93 static int
94 vhost_vdpa_set_owner(struct virtio_user_dev *dev)
95 {
96         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_OWNER, NULL);
97 }
98
99 static int
100 vhost_vdpa_get_backend_features(struct virtio_user_dev *dev, uint64_t *features)
101 {
102         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_BACKEND_FEATURES, features);
103 }
104
105 static int
106 vhost_vdpa_set_backend_features(struct virtio_user_dev *dev, uint64_t features)
107 {
108         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_BACKEND_FEATURES, &features);
109 }
110
111 static int
112 vhost_vdpa_get_features(struct virtio_user_dev *dev, uint64_t *features)
113 {
114         int ret;
115
116         ret = vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_FEATURES, features);
117         if (ret) {
118                 PMD_DRV_LOG(ERR, "Failed to get features");
119                 return -1;
120         }
121
122         /* Multiqueue not supported for now */
123         *features &= ~(1ULL << VIRTIO_NET_F_MQ);
124
125         return 0;
126 }
127
128 static int
129 vhost_vdpa_set_features(struct virtio_user_dev *dev, uint64_t features)
130 {
131         /* WORKAROUND */
132         features |= 1ULL << VIRTIO_F_IOMMU_PLATFORM;
133
134         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_FEATURES, &features);
135 }
136
137 static int
138 vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
139 {
140         struct vhost_msg msg = {};
141
142         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
143                 return 0;
144
145         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
146                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
147                 return -1;
148         }
149
150         msg.type = VHOST_IOTLB_MSG_V2;
151         msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
152
153         if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
154                 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
155                                 strerror(errno));
156                 return -1;
157         }
158
159         return 0;
160 }
161
162 static int
163 vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
164 {
165         struct vhost_msg msg = {};
166
167         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
168                 return 0;
169
170         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
171                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
172                 return -1;
173         }
174
175         msg.type = VHOST_IOTLB_MSG_V2;
176         msg.iotlb.type = VHOST_IOTLB_BATCH_END;
177
178         if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
179                 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
180                                 strerror(errno));
181                 return -1;
182         }
183
184         return 0;
185 }
186
187 static int
188 vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
189                                   uint64_t iova, size_t len)
190 {
191         struct vhost_msg msg = {};
192
193         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
194                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
195                 return -1;
196         }
197
198         msg.type = VHOST_IOTLB_MSG_V2;
199         msg.iotlb.type = VHOST_IOTLB_UPDATE;
200         msg.iotlb.iova = iova;
201         msg.iotlb.uaddr = (uint64_t)(uintptr_t)addr;
202         msg.iotlb.size = len;
203         msg.iotlb.perm = VHOST_ACCESS_RW;
204
205         PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", addr: %p, len: 0x%zx",
206                         __func__, iova, addr, len);
207
208         if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
209                 PMD_DRV_LOG(ERR, "Failed to send IOTLB update (%s)",
210                                 strerror(errno));
211                 return -1;
212         }
213
214         return 0;
215 }
216
217 static int
218 vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
219                                   uint64_t iova, size_t len)
220 {
221         struct vhost_msg msg = {};
222
223         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
224                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
225                 return -1;
226         }
227
228         msg.type = VHOST_IOTLB_MSG_V2;
229         msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
230         msg.iotlb.iova = iova;
231         msg.iotlb.size = len;
232
233         PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", len: 0x%zx",
234                         __func__, iova, len);
235
236         if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
237                 PMD_DRV_LOG(ERR, "Failed to send IOTLB invalidate (%s)",
238                                 strerror(errno));
239                 return -1;
240         }
241
242         return 0;
243 }
244
245 static int
246 vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
247                                   uint64_t iova, size_t len)
248 {
249         int ret;
250
251         if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
252                 return -1;
253
254         ret = vhost_vdpa_dma_map(dev, addr, iova, len);
255
256         if (vhost_vdpa_iotlb_batch_end(dev) < 0)
257                 return -1;
258
259         return ret;
260 }
261
262 static int
263 vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
264                                   uint64_t iova, size_t len)
265 {
266         int ret;
267
268         if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
269                 return -1;
270
271         ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
272
273         if (vhost_vdpa_iotlb_batch_end(dev) < 0)
274                 return -1;
275
276         return ret;
277 }
278
279 static int
280 vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
281                 const struct rte_memseg *ms, size_t len, void *arg)
282 {
283         struct virtio_user_dev *dev = arg;
284
285         if (msl->external)
286                 return 0;
287
288         return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, len);
289 }
290
291 static int
292 vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
293                 void *arg)
294 {
295         struct virtio_user_dev *dev = arg;
296
297         /* skip external memory that isn't a heap */
298         if (msl->external && !msl->heap)
299                 return 0;
300
301         /* skip any segments with invalid IOVA addresses */
302         if (ms->iova == RTE_BAD_IOVA)
303                 return 0;
304
305         /* if IOVA mode is VA, we've already mapped the internal segments */
306         if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
307                 return 0;
308
309         return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, ms->len);
310 }
311
312 static int
313 vhost_vdpa_set_memory_table(struct virtio_user_dev *dev)
314 {
315         int ret;
316
317         if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
318                 return -1;
319
320         vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
321
322         if (rte_eal_iova_mode() == RTE_IOVA_VA) {
323                 /* with IOVA as VA mode, we can get away with mapping contiguous
324                  * chunks rather than going page-by-page.
325                  */
326                 ret = rte_memseg_contig_walk_thread_unsafe(
327                                 vhost_vdpa_map_contig, dev);
328                 if (ret)
329                         goto batch_end;
330                 /* we have to continue the walk because we've skipped the
331                  * external segments during the config walk.
332                  */
333         }
334         ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
335
336 batch_end:
337         if (vhost_vdpa_iotlb_batch_end(dev) < 0)
338                 return -1;
339
340         return ret;
341 }
342
343 static int
344 vhost_vdpa_set_vring_enable(struct virtio_user_dev *dev, struct vhost_vring_state *state)
345 {
346         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_VDPA_SET_VRING_ENABLE, state);
347 }
348
349 static int
350 vhost_vdpa_set_vring_num(struct virtio_user_dev *dev, struct vhost_vring_state *state)
351 {
352         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_NUM, state);
353 }
354
355 static int
356 vhost_vdpa_set_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
357 {
358         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_BASE, state);
359 }
360
361 static int
362 vhost_vdpa_get_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
363 {
364         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_VRING_BASE, state);
365 }
366
367 /* with below features, vhost vdpa does not need to do the checksum and TSO,
368  * these info will be passed to virtio_user through virtio net header.
369  */
370 #define VHOST_VDPA_GUEST_OFFLOADS_MASK  \
371         ((1ULL << VIRTIO_NET_F_GUEST_CSUM) |    \
372          (1ULL << VIRTIO_NET_F_GUEST_TSO4) |    \
373          (1ULL << VIRTIO_NET_F_GUEST_TSO6) |    \
374          (1ULL << VIRTIO_NET_F_GUEST_ECN)  |    \
375          (1ULL << VIRTIO_NET_F_GUEST_UFO))
376
377 #define VHOST_VDPA_HOST_OFFLOADS_MASK           \
378         ((1ULL << VIRTIO_NET_F_HOST_TSO4) |     \
379          (1ULL << VIRTIO_NET_F_HOST_TSO6) |     \
380          (1ULL << VIRTIO_NET_F_CSUM))
381
382 static int
383 vhost_vdpa_send_request(struct virtio_user_dev *dev,
384                    enum vhost_user_request req,
385                    void *arg)
386 {
387         int ret = -1;
388         uint64_t req_vdpa;
389
390         PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
391
392         req_vdpa = vhost_req_user_to_vdpa[req];
393
394         switch (req_vdpa) {
395         case VHOST_SET_VRING_ADDR:
396         case VHOST_SET_VRING_KICK:
397         case VHOST_SET_VRING_CALL:
398                 PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u",
399                             dev->vhostfd, *(unsigned int *)arg);
400                 break;
401         default:
402                 break;
403         }
404
405         ret = ioctl(dev->vhostfd, req_vdpa, arg);
406         if (ret < 0)
407                 PMD_DRV_LOG(ERR, "%s failed: %s",
408                             vhost_msg_strings[req], strerror(errno));
409
410         return ret;
411 }
412
413 /**
414  * Set up environment to talk with a vhost vdpa backend.
415  *
416  * @return
417  *   - (-1) if fail to set up;
418  *   - (>=0) if successful.
419  */
420 static int
421 vhost_vdpa_setup(struct virtio_user_dev *dev)
422 {
423         uint32_t did = (uint32_t)-1;
424
425         dev->vhostfd = open(dev->path, O_RDWR);
426         if (dev->vhostfd < 0) {
427                 PMD_DRV_LOG(ERR, "Failed to open %s: %s\n",
428                                 dev->path, strerror(errno));
429                 return -1;
430         }
431
432         if (ioctl(dev->vhostfd, VHOST_VDPA_GET_DEVICE_ID, &did) < 0 ||
433                         did != VIRTIO_ID_NETWORK) {
434                 PMD_DRV_LOG(ERR, "Invalid vdpa device ID: %u\n", did);
435                 return -1;
436         }
437
438         return 0;
439 }
440
441 static int
442 vhost_vdpa_enable_queue_pair(struct virtio_user_dev *dev,
443                                uint16_t pair_idx,
444                                int enable)
445 {
446         int i;
447
448         if (dev->qp_enabled[pair_idx] == enable)
449                 return 0;
450
451         for (i = 0; i < 2; ++i) {
452                 struct vhost_vring_state state = {
453                         .index = pair_idx * 2 + i,
454                         .num   = enable,
455                 };
456
457                 if (vhost_vdpa_set_vring_enable(dev, &state))
458                         return -1;
459         }
460
461         dev->qp_enabled[pair_idx] = enable;
462
463         return 0;
464 }
465
466 struct virtio_user_backend_ops virtio_ops_vdpa = {
467         .setup = vhost_vdpa_setup,
468         .set_owner = vhost_vdpa_set_owner,
469         .get_features = vhost_vdpa_get_features,
470         .set_features = vhost_vdpa_set_features,
471         .get_protocol_features = vhost_vdpa_get_backend_features,
472         .set_protocol_features = vhost_vdpa_set_backend_features,
473         .set_memory_table = vhost_vdpa_set_memory_table,
474         .set_vring_num = vhost_vdpa_set_vring_num,
475         .set_vring_base = vhost_vdpa_set_vring_base,
476         .get_vring_base = vhost_vdpa_get_vring_base,
477         .send_request = vhost_vdpa_send_request,
478         .enable_qp = vhost_vdpa_enable_queue_pair,
479         .dma_map = vhost_vdpa_dma_map_batch,
480         .dma_unmap = vhost_vdpa_dma_unmap_batch,
481 };