net/virtio: add virtio-user status ops
[dpdk.git] / drivers / net / virtio / virtio_user / vhost_vdpa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Red Hat Inc.
3  */
4
5 #include <sys/ioctl.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <fcntl.h>
9 #include <unistd.h>
10
11 #include <rte_memory.h>
12
13 #include "vhost.h"
14 #include "virtio_user_dev.h"
15
16 /* vhost kernel & vdpa ioctls */
17 #define VHOST_VIRTIO 0xAF
18 #define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
19 #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
20 #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
21 #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
22 #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
23 #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
24 #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
25 #define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
26 #define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
27 #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
28 #define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
29 #define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
30 #define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
31 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
32 #define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
33 #define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
34 #define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
35 #define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, struct vhost_vring_state)
36 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
37 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
38
39 static uint64_t vhost_req_user_to_vdpa[] = {
40         [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
41 };
42
43 /* no alignment requirement */
44 struct vhost_iotlb_msg {
45         uint64_t iova;
46         uint64_t size;
47         uint64_t uaddr;
48 #define VHOST_ACCESS_RO      0x1
49 #define VHOST_ACCESS_WO      0x2
50 #define VHOST_ACCESS_RW      0x3
51         uint8_t perm;
52 #define VHOST_IOTLB_MISS           1
53 #define VHOST_IOTLB_UPDATE         2
54 #define VHOST_IOTLB_INVALIDATE     3
55 #define VHOST_IOTLB_ACCESS_FAIL    4
56 #define VHOST_IOTLB_BATCH_BEGIN    5
57 #define VHOST_IOTLB_BATCH_END      6
58         uint8_t type;
59 };
60
61 #define VHOST_IOTLB_MSG_V2 0x2
62
63 struct vhost_msg {
64         uint32_t type;
65         uint32_t reserved;
66         union {
67                 struct vhost_iotlb_msg iotlb;
68                 uint8_t padding[64];
69         };
70 };
71
72
73 static int
74 vhost_vdpa_ioctl(int fd, uint64_t request, void *arg)
75 {
76         int ret;
77
78         ret = ioctl(fd, request, arg);
79         if (ret) {
80                 PMD_DRV_LOG(ERR, "Vhost-vDPA ioctl %"PRIu64" failed (%s)",
81                                 request, strerror(errno));
82                 return -1;
83         }
84
85         return 0;
86 }
87
88 static int
89 vhost_vdpa_set_owner(struct virtio_user_dev *dev)
90 {
91         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_OWNER, NULL);
92 }
93
94 static int
95 vhost_vdpa_get_backend_features(struct virtio_user_dev *dev, uint64_t *features)
96 {
97         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_BACKEND_FEATURES, features);
98 }
99
100 static int
101 vhost_vdpa_set_backend_features(struct virtio_user_dev *dev, uint64_t features)
102 {
103         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_BACKEND_FEATURES, &features);
104 }
105
106 static int
107 vhost_vdpa_get_features(struct virtio_user_dev *dev, uint64_t *features)
108 {
109         int ret;
110
111         ret = vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_FEATURES, features);
112         if (ret) {
113                 PMD_DRV_LOG(ERR, "Failed to get features");
114                 return -1;
115         }
116
117         /* Multiqueue not supported for now */
118         *features &= ~(1ULL << VIRTIO_NET_F_MQ);
119
120         return 0;
121 }
122
123 static int
124 vhost_vdpa_set_features(struct virtio_user_dev *dev, uint64_t features)
125 {
126         /* WORKAROUND */
127         features |= 1ULL << VIRTIO_F_IOMMU_PLATFORM;
128
129         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_FEATURES, &features);
130 }
131
132 static int
133 vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
134 {
135         struct vhost_msg msg = {};
136
137         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
138                 return 0;
139
140         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
141                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
142                 return -1;
143         }
144
145         msg.type = VHOST_IOTLB_MSG_V2;
146         msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
147
148         if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
149                 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
150                                 strerror(errno));
151                 return -1;
152         }
153
154         return 0;
155 }
156
157 static int
158 vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
159 {
160         struct vhost_msg msg = {};
161
162         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
163                 return 0;
164
165         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
166                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
167                 return -1;
168         }
169
170         msg.type = VHOST_IOTLB_MSG_V2;
171         msg.iotlb.type = VHOST_IOTLB_BATCH_END;
172
173         if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
174                 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
175                                 strerror(errno));
176                 return -1;
177         }
178
179         return 0;
180 }
181
182 static int
183 vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
184                                   uint64_t iova, size_t len)
185 {
186         struct vhost_msg msg = {};
187
188         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
189                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
190                 return -1;
191         }
192
193         msg.type = VHOST_IOTLB_MSG_V2;
194         msg.iotlb.type = VHOST_IOTLB_UPDATE;
195         msg.iotlb.iova = iova;
196         msg.iotlb.uaddr = (uint64_t)(uintptr_t)addr;
197         msg.iotlb.size = len;
198         msg.iotlb.perm = VHOST_ACCESS_RW;
199
200         PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", addr: %p, len: 0x%zx",
201                         __func__, iova, addr, len);
202
203         if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
204                 PMD_DRV_LOG(ERR, "Failed to send IOTLB update (%s)",
205                                 strerror(errno));
206                 return -1;
207         }
208
209         return 0;
210 }
211
212 static int
213 vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
214                                   uint64_t iova, size_t len)
215 {
216         struct vhost_msg msg = {};
217
218         if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
219                 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
220                 return -1;
221         }
222
223         msg.type = VHOST_IOTLB_MSG_V2;
224         msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
225         msg.iotlb.iova = iova;
226         msg.iotlb.size = len;
227
228         PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", len: 0x%zx",
229                         __func__, iova, len);
230
231         if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
232                 PMD_DRV_LOG(ERR, "Failed to send IOTLB invalidate (%s)",
233                                 strerror(errno));
234                 return -1;
235         }
236
237         return 0;
238 }
239
240 static int
241 vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
242                                   uint64_t iova, size_t len)
243 {
244         int ret;
245
246         if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
247                 return -1;
248
249         ret = vhost_vdpa_dma_map(dev, addr, iova, len);
250
251         if (vhost_vdpa_iotlb_batch_end(dev) < 0)
252                 return -1;
253
254         return ret;
255 }
256
257 static int
258 vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
259                                   uint64_t iova, size_t len)
260 {
261         int ret;
262
263         if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
264                 return -1;
265
266         ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
267
268         if (vhost_vdpa_iotlb_batch_end(dev) < 0)
269                 return -1;
270
271         return ret;
272 }
273
274 static int
275 vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
276                 const struct rte_memseg *ms, size_t len, void *arg)
277 {
278         struct virtio_user_dev *dev = arg;
279
280         if (msl->external)
281                 return 0;
282
283         return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, len);
284 }
285
286 static int
287 vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
288                 void *arg)
289 {
290         struct virtio_user_dev *dev = arg;
291
292         /* skip external memory that isn't a heap */
293         if (msl->external && !msl->heap)
294                 return 0;
295
296         /* skip any segments with invalid IOVA addresses */
297         if (ms->iova == RTE_BAD_IOVA)
298                 return 0;
299
300         /* if IOVA mode is VA, we've already mapped the internal segments */
301         if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
302                 return 0;
303
304         return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, ms->len);
305 }
306
307 static int
308 vhost_vdpa_set_memory_table(struct virtio_user_dev *dev)
309 {
310         int ret;
311
312         if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
313                 return -1;
314
315         vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
316
317         if (rte_eal_iova_mode() == RTE_IOVA_VA) {
318                 /* with IOVA as VA mode, we can get away with mapping contiguous
319                  * chunks rather than going page-by-page.
320                  */
321                 ret = rte_memseg_contig_walk_thread_unsafe(
322                                 vhost_vdpa_map_contig, dev);
323                 if (ret)
324                         goto batch_end;
325                 /* we have to continue the walk because we've skipped the
326                  * external segments during the config walk.
327                  */
328         }
329         ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
330
331 batch_end:
332         if (vhost_vdpa_iotlb_batch_end(dev) < 0)
333                 return -1;
334
335         return ret;
336 }
337
338 static int
339 vhost_vdpa_set_vring_enable(struct virtio_user_dev *dev, struct vhost_vring_state *state)
340 {
341         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_VDPA_SET_VRING_ENABLE, state);
342 }
343
344 static int
345 vhost_vdpa_set_vring_num(struct virtio_user_dev *dev, struct vhost_vring_state *state)
346 {
347         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_NUM, state);
348 }
349
350 static int
351 vhost_vdpa_set_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
352 {
353         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_BASE, state);
354 }
355
356 static int
357 vhost_vdpa_get_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
358 {
359         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_VRING_BASE, state);
360 }
361
362 static int
363 vhost_vdpa_set_vring_call(struct virtio_user_dev *dev, struct vhost_vring_file *file)
364 {
365         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_CALL, file);
366 }
367
368 static int
369 vhost_vdpa_set_vring_kick(struct virtio_user_dev *dev, struct vhost_vring_file *file)
370 {
371         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_KICK, file);
372 }
373
374 static int
375 vhost_vdpa_set_vring_addr(struct virtio_user_dev *dev, struct vhost_vring_addr *addr)
376 {
377         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_ADDR, addr);
378 }
379
380 static int
381 vhost_vdpa_get_status(struct virtio_user_dev *dev, uint8_t *status)
382 {
383         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_VDPA_GET_STATUS, status);
384 }
385
386 static int
387 vhost_vdpa_set_status(struct virtio_user_dev *dev, uint8_t status)
388 {
389         return vhost_vdpa_ioctl(dev->vhostfd, VHOST_VDPA_SET_STATUS, &status);
390 }
391
392 /* with below features, vhost vdpa does not need to do the checksum and TSO,
393  * these info will be passed to virtio_user through virtio net header.
394  */
395 #define VHOST_VDPA_GUEST_OFFLOADS_MASK  \
396         ((1ULL << VIRTIO_NET_F_GUEST_CSUM) |    \
397          (1ULL << VIRTIO_NET_F_GUEST_TSO4) |    \
398          (1ULL << VIRTIO_NET_F_GUEST_TSO6) |    \
399          (1ULL << VIRTIO_NET_F_GUEST_ECN)  |    \
400          (1ULL << VIRTIO_NET_F_GUEST_UFO))
401
402 #define VHOST_VDPA_HOST_OFFLOADS_MASK           \
403         ((1ULL << VIRTIO_NET_F_HOST_TSO4) |     \
404          (1ULL << VIRTIO_NET_F_HOST_TSO6) |     \
405          (1ULL << VIRTIO_NET_F_CSUM))
406
407 static int
408 vhost_vdpa_send_request(struct virtio_user_dev *dev,
409                    enum vhost_user_request req,
410                    void *arg)
411 {
412         int ret = -1;
413         uint64_t req_vdpa;
414
415         PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
416
417         req_vdpa = vhost_req_user_to_vdpa[req];
418
419         switch (req_vdpa) {
420         case VHOST_SET_VRING_ADDR:
421                 PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u",
422                             dev->vhostfd, *(unsigned int *)arg);
423                 break;
424         default:
425                 break;
426         }
427
428         ret = ioctl(dev->vhostfd, req_vdpa, arg);
429         if (ret < 0)
430                 PMD_DRV_LOG(ERR, "%s failed: %s",
431                             vhost_msg_strings[req], strerror(errno));
432
433         return ret;
434 }
435
436 /**
437  * Set up environment to talk with a vhost vdpa backend.
438  *
439  * @return
440  *   - (-1) if fail to set up;
441  *   - (>=0) if successful.
442  */
443 static int
444 vhost_vdpa_setup(struct virtio_user_dev *dev)
445 {
446         uint32_t did = (uint32_t)-1;
447
448         dev->vhostfd = open(dev->path, O_RDWR);
449         if (dev->vhostfd < 0) {
450                 PMD_DRV_LOG(ERR, "Failed to open %s: %s\n",
451                                 dev->path, strerror(errno));
452                 return -1;
453         }
454
455         if (ioctl(dev->vhostfd, VHOST_VDPA_GET_DEVICE_ID, &did) < 0 ||
456                         did != VIRTIO_ID_NETWORK) {
457                 PMD_DRV_LOG(ERR, "Invalid vdpa device ID: %u\n", did);
458                 return -1;
459         }
460
461         return 0;
462 }
463
464 static int
465 vhost_vdpa_enable_queue_pair(struct virtio_user_dev *dev,
466                                uint16_t pair_idx,
467                                int enable)
468 {
469         int i;
470
471         if (dev->qp_enabled[pair_idx] == enable)
472                 return 0;
473
474         for (i = 0; i < 2; ++i) {
475                 struct vhost_vring_state state = {
476                         .index = pair_idx * 2 + i,
477                         .num   = enable,
478                 };
479
480                 if (vhost_vdpa_set_vring_enable(dev, &state))
481                         return -1;
482         }
483
484         dev->qp_enabled[pair_idx] = enable;
485
486         return 0;
487 }
488
489 struct virtio_user_backend_ops virtio_ops_vdpa = {
490         .setup = vhost_vdpa_setup,
491         .set_owner = vhost_vdpa_set_owner,
492         .get_features = vhost_vdpa_get_features,
493         .set_features = vhost_vdpa_set_features,
494         .get_protocol_features = vhost_vdpa_get_backend_features,
495         .set_protocol_features = vhost_vdpa_set_backend_features,
496         .set_memory_table = vhost_vdpa_set_memory_table,
497         .set_vring_num = vhost_vdpa_set_vring_num,
498         .set_vring_base = vhost_vdpa_set_vring_base,
499         .get_vring_base = vhost_vdpa_get_vring_base,
500         .set_vring_call = vhost_vdpa_set_vring_call,
501         .set_vring_kick = vhost_vdpa_set_vring_kick,
502         .set_vring_addr = vhost_vdpa_set_vring_addr,
503         .get_status = vhost_vdpa_get_status,
504         .set_status = vhost_vdpa_set_status,
505         .send_request = vhost_vdpa_send_request,
506         .enable_qp = vhost_vdpa_enable_queue_pair,
507         .dma_map = vhost_vdpa_dma_map_batch,
508         .dma_unmap = vhost_vdpa_dma_unmap_batch,
509 };