1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Red Hat Inc.
11 #include <rte_memory.h>
14 #include "virtio_user_dev.h"
16 /* vhost kernel & vdpa ioctls */
17 #define VHOST_VIRTIO 0xAF
18 #define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
19 #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
20 #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
21 #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
22 #define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, void *)
23 #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
24 #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
25 #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
26 #define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
27 #define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
28 #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
29 #define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
30 #define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
31 #define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
32 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
33 #define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
34 #define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
35 #define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
36 #define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, \
37 struct vhost_vring_state)
38 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
39 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
41 static uint64_t vhost_req_user_to_vdpa[] = {
42 [VHOST_USER_SET_OWNER] = VHOST_SET_OWNER,
43 [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
44 [VHOST_USER_SET_FEATURES] = VHOST_SET_FEATURES,
45 [VHOST_USER_GET_FEATURES] = VHOST_GET_FEATURES,
46 [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
47 [VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
48 [VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
49 [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
50 [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
51 [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
52 [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
53 [VHOST_USER_SET_STATUS] = VHOST_VDPA_SET_STATUS,
54 [VHOST_USER_GET_STATUS] = VHOST_VDPA_GET_STATUS,
55 [VHOST_USER_SET_VRING_ENABLE] = VHOST_VDPA_SET_VRING_ENABLE,
56 [VHOST_USER_GET_PROTOCOL_FEATURES] = VHOST_GET_BACKEND_FEATURES,
57 [VHOST_USER_SET_PROTOCOL_FEATURES] = VHOST_SET_BACKEND_FEATURES,
60 /* no alignment requirement */
61 struct vhost_iotlb_msg {
65 #define VHOST_ACCESS_RO 0x1
66 #define VHOST_ACCESS_WO 0x2
67 #define VHOST_ACCESS_RW 0x3
69 #define VHOST_IOTLB_MISS 1
70 #define VHOST_IOTLB_UPDATE 2
71 #define VHOST_IOTLB_INVALIDATE 3
72 #define VHOST_IOTLB_ACCESS_FAIL 4
76 #define VHOST_IOTLB_MSG_V2 0x2
82 struct vhost_iotlb_msg iotlb;
88 vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
89 uint64_t iova, size_t len)
91 struct vhost_msg msg = {};
93 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
94 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
98 msg.type = VHOST_IOTLB_MSG_V2;
99 msg.iotlb.type = VHOST_IOTLB_UPDATE;
100 msg.iotlb.iova = iova;
101 msg.iotlb.uaddr = (uint64_t)(uintptr_t)addr;
102 msg.iotlb.size = len;
103 msg.iotlb.perm = VHOST_ACCESS_RW;
105 PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", addr: %p, len: 0x%zx",
106 __func__, iova, addr, len);
108 if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
109 PMD_DRV_LOG(ERR, "Failed to send IOTLB update (%s)",
118 vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
119 uint64_t iova, size_t len)
121 struct vhost_msg msg = {};
123 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
124 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
128 msg.type = VHOST_IOTLB_MSG_V2;
129 msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
130 msg.iotlb.iova = iova;
131 msg.iotlb.size = len;
133 PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", len: 0x%zx",
134 __func__, iova, len);
136 if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
137 PMD_DRV_LOG(ERR, "Failed to send IOTLB invalidate (%s)",
147 vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
148 const struct rte_memseg *ms, size_t len, void *arg)
150 struct virtio_user_dev *dev = arg;
155 return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, len);
159 vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
162 struct virtio_user_dev *dev = arg;
164 /* skip external memory that isn't a heap */
165 if (msl->external && !msl->heap)
168 /* skip any segments with invalid IOVA addresses */
169 if (ms->iova == RTE_BAD_IOVA)
172 /* if IOVA mode is VA, we've already mapped the internal segments */
173 if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
176 return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, ms->len);
180 vhost_vdpa_dma_map_all(struct virtio_user_dev *dev)
182 vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
184 if (rte_eal_iova_mode() == RTE_IOVA_VA) {
185 /* with IOVA as VA mode, we can get away with mapping contiguous
186 * chunks rather than going page-by-page.
188 int ret = rte_memseg_contig_walk_thread_unsafe(
189 vhost_vdpa_map_contig, dev);
192 /* we have to continue the walk because we've skipped the
193 * external segments during the config walk.
196 return rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
199 /* with below features, vhost vdpa does not need to do the checksum and TSO,
200 * these info will be passed to virtio_user through virtio net header.
202 #define VHOST_VDPA_GUEST_OFFLOADS_MASK \
203 ((1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
204 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
205 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
206 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
207 (1ULL << VIRTIO_NET_F_GUEST_UFO))
209 #define VHOST_VDPA_HOST_OFFLOADS_MASK \
210 ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
211 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
212 (1ULL << VIRTIO_NET_F_CSUM))
215 vhost_vdpa_ioctl(struct virtio_user_dev *dev,
216 enum vhost_user_request req,
222 PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
224 req_vdpa = vhost_req_user_to_vdpa[req];
226 if (req_vdpa == VHOST_SET_MEM_TABLE)
227 return vhost_vdpa_dma_map_all(dev);
229 if (req_vdpa == VHOST_SET_FEATURES) {
231 *(uint64_t *)arg |= 1ULL << VIRTIO_F_IOMMU_PLATFORM;
233 /* Multiqueue not supported for now */
234 *(uint64_t *)arg &= ~(1ULL << VIRTIO_NET_F_MQ);
238 case VHOST_SET_VRING_NUM:
239 case VHOST_SET_VRING_ADDR:
240 case VHOST_SET_VRING_BASE:
241 case VHOST_GET_VRING_BASE:
242 case VHOST_SET_VRING_KICK:
243 case VHOST_SET_VRING_CALL:
244 PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u",
245 dev->vhostfd, *(unsigned int *)arg);
251 ret = ioctl(dev->vhostfd, req_vdpa, arg);
253 PMD_DRV_LOG(ERR, "%s failed: %s",
254 vhost_msg_strings[req], strerror(errno));
260 * Set up environment to talk with a vhost vdpa backend.
263 * - (-1) if fail to set up;
264 * - (>=0) if successful.
267 vhost_vdpa_setup(struct virtio_user_dev *dev)
269 uint32_t did = (uint32_t)-1;
271 dev->vhostfd = open(dev->path, O_RDWR);
272 if (dev->vhostfd < 0) {
273 PMD_DRV_LOG(ERR, "Failed to open %s: %s\n",
274 dev->path, strerror(errno));
278 if (ioctl(dev->vhostfd, VHOST_VDPA_GET_DEVICE_ID, &did) < 0 ||
279 did != VIRTIO_ID_NETWORK) {
280 PMD_DRV_LOG(ERR, "Invalid vdpa device ID: %u\n", did);
288 vhost_vdpa_enable_queue_pair(struct virtio_user_dev *dev,
294 if (dev->qp_enabled[pair_idx] == enable)
297 for (i = 0; i < 2; ++i) {
298 struct vhost_vring_state state = {
299 .index = pair_idx * 2 + i,
303 if (vhost_vdpa_ioctl(dev, VHOST_USER_SET_VRING_ENABLE, &state))
307 dev->qp_enabled[pair_idx] = enable;
312 struct virtio_user_backend_ops virtio_ops_vdpa = {
313 .setup = vhost_vdpa_setup,
314 .send_request = vhost_vdpa_ioctl,
315 .enable_qp = vhost_vdpa_enable_queue_pair,
316 .dma_map = vhost_vdpa_dma_map,
317 .dma_unmap = vhost_vdpa_dma_unmap,