4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/types.h>
42 #include <sys/ioctl.h>
43 #include <sys/socket.h>
44 #include <net/ethernet.h>
45 #include <netinet/in.h>
46 #include <netinet/if_ether.h>
47 #include <linux/if_packet.h>
49 #include <rte_common.h>
52 #include "virtio-net.h"
53 #include "virtio-net-user.h"
54 #include "vhost-net-user.h"
55 #include "vhost-net.h"
57 struct orig_region_map {
59 uint64_t mapped_address;
64 #define orig_region(ptr, nregions) \
65 ((struct orig_region_map *)RTE_PTR_ADD((ptr), \
66 sizeof(struct virtio_memory) + \
67 sizeof(struct virtio_memory_regions) * (nregions)))
75 return (uint64_t)stat.st_blksize;
79 free_mem_region(struct virtio_net *dev)
81 struct orig_region_map *region;
84 if (!dev || !dev->mem)
87 region = orig_region(dev->mem, dev->mem->nregions);
88 for (idx = 0; idx < dev->mem->nregions; idx++) {
89 if (region[idx].mapped_address) {
90 munmap((void *)(uintptr_t)region[idx].mapped_address,
91 region[idx].mapped_size);
92 close(region[idx].fd);
98 user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
100 struct VhostUserMemory memory = pmsg->payload.memory;
101 struct virtio_memory_regions *pregion;
102 uint64_t mapped_address, mapped_size;
103 struct virtio_net *dev;
104 unsigned int idx = 0;
105 struct orig_region_map *pregion_orig;
108 /* unmap old memory regions one by one*/
109 dev = get_device(ctx);
113 /* Remove from the data plane. */
114 if (dev->flags & VIRTIO_DEV_RUNNING)
115 notify_ops->destroy_device(dev);
118 free_mem_region(dev);
124 sizeof(struct virtio_memory) +
125 sizeof(struct virtio_memory_regions) * memory.nregions +
126 sizeof(struct orig_region_map) * memory.nregions);
127 if (dev->mem == NULL) {
128 RTE_LOG(ERR, VHOST_CONFIG,
129 "(%"PRIu64") Failed to allocate memory for dev->mem\n",
133 dev->mem->nregions = memory.nregions;
135 pregion_orig = orig_region(dev->mem, memory.nregions);
136 for (idx = 0; idx < memory.nregions; idx++) {
137 pregion = &dev->mem->regions[idx];
138 pregion->guest_phys_address =
139 memory.regions[idx].guest_phys_addr;
140 pregion->guest_phys_address_end =
141 memory.regions[idx].guest_phys_addr +
142 memory.regions[idx].memory_size;
143 pregion->memory_size =
144 memory.regions[idx].memory_size;
145 pregion->userspace_address =
146 memory.regions[idx].userspace_addr;
149 mapped_size = memory.regions[idx].memory_size +
150 memory.regions[idx].mmap_offset;
152 /* mmap() without flag of MAP_ANONYMOUS, should be called
153 * with length argument aligned with hugepagesz at older
154 * longterm version Linux, like 2.6.32 and 3.2.72, or
155 * mmap() will fail with EINVAL.
157 * to avoid failure, make sure in caller to keep length
160 alignment = get_blk_size(pmsg->fds[idx]);
161 mapped_size = RTE_ALIGN_CEIL(mapped_size, alignment);
163 mapped_address = (uint64_t)(uintptr_t)mmap(NULL,
165 PROT_READ | PROT_WRITE, MAP_SHARED,
169 RTE_LOG(INFO, VHOST_CONFIG,
170 "mapped region %d fd:%d to:%p sz:0x%"PRIx64" "
171 "off:0x%"PRIx64" align:0x%"PRIx64"\n",
172 idx, pmsg->fds[idx], (void *)(uintptr_t)mapped_address,
173 mapped_size, memory.regions[idx].mmap_offset,
176 if (mapped_address == (uint64_t)(uintptr_t)MAP_FAILED) {
177 RTE_LOG(ERR, VHOST_CONFIG,
178 "mmap qemu guest failed.\n");
182 pregion_orig[idx].mapped_address = mapped_address;
183 pregion_orig[idx].mapped_size = mapped_size;
184 pregion_orig[idx].blksz = alignment;
185 pregion_orig[idx].fd = pmsg->fds[idx];
187 mapped_address += memory.regions[idx].mmap_offset;
189 pregion->address_offset = mapped_address -
190 pregion->guest_phys_address;
192 if (memory.regions[idx].guest_phys_addr == 0) {
193 dev->mem->base_address =
194 memory.regions[idx].userspace_addr;
195 dev->mem->mapped_address =
196 pregion->address_offset;
199 LOG_DEBUG(VHOST_CONFIG,
200 "REGION: %u GPA: %p QEMU VA: %p SIZE (%"PRIu64")\n",
202 (void *)(uintptr_t)pregion->guest_phys_address,
203 (void *)(uintptr_t)pregion->userspace_address,
204 pregion->memory_size);
211 munmap((void *)(uintptr_t)pregion_orig[idx].mapped_address,
212 pregion_orig[idx].mapped_size);
213 close(pregion_orig[idx].fd);
221 vq_is_ready(struct vhost_virtqueue *vq)
223 return vq && vq->desc &&
229 virtio_is_ready(struct virtio_net *dev)
231 struct vhost_virtqueue *rvq, *tvq;
234 for (i = 0; i < dev->virt_qp_nb; i++) {
235 rvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ];
236 tvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ];
238 if (!vq_is_ready(rvq) || !vq_is_ready(tvq)) {
239 RTE_LOG(INFO, VHOST_CONFIG,
240 "virtio is not ready for processing.\n");
245 RTE_LOG(INFO, VHOST_CONFIG,
246 "virtio is now ready for processing.\n");
251 user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
253 struct vhost_vring_file file;
255 file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
256 if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
259 file.fd = pmsg->fds[0];
260 RTE_LOG(INFO, VHOST_CONFIG,
261 "vring call idx:%d file:%d\n", file.index, file.fd);
262 ops->set_vring_call(ctx, &file);
267 * In vhost-user, when we receive kick message, will test whether virtio
268 * device is ready for packet processing.
271 user_set_vring_kick(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
273 struct vhost_vring_file file;
274 struct virtio_net *dev = get_device(ctx);
276 file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
277 if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
280 file.fd = pmsg->fds[0];
281 RTE_LOG(INFO, VHOST_CONFIG,
282 "vring kick idx:%d file:%d\n", file.index, file.fd);
283 ops->set_vring_kick(ctx, &file);
285 if (virtio_is_ready(dev) &&
286 !(dev->flags & VIRTIO_DEV_RUNNING))
287 notify_ops->new_device(dev);
291 * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
294 user_get_vring_base(struct vhost_device_ctx ctx,
295 struct vhost_vring_state *state)
297 struct virtio_net *dev = get_device(ctx);
301 /* We have to stop the queue (virtio) if it is running. */
302 if (dev->flags & VIRTIO_DEV_RUNNING)
303 notify_ops->destroy_device(dev);
305 /* Here we are safe to get the last used index */
306 ops->get_vring_base(ctx, state->index, state);
308 RTE_LOG(INFO, VHOST_CONFIG,
309 "vring base idx:%d file:%d\n", state->index, state->num);
311 * Based on current qemu vhost-user implementation, this message is
312 * sent and only sent in vhost_vring_stop.
313 * TODO: cleanup the vring, it isn't usable since here.
315 if (dev->virtqueue[state->index]->kickfd >= 0) {
316 close(dev->virtqueue[state->index]->kickfd);
317 dev->virtqueue[state->index]->kickfd = -1;
324 * when virtio queues are ready to work, qemu will send us to
325 * enable the virtio queue pair.
328 user_set_vring_enable(struct vhost_device_ctx ctx,
329 struct vhost_vring_state *state)
331 struct virtio_net *dev = get_device(ctx);
332 int enable = (int)state->num;
334 RTE_LOG(INFO, VHOST_CONFIG,
335 "set queue enable: %d to qp idx: %d\n",
336 enable, state->index);
338 if (notify_ops->vring_state_changed) {
339 notify_ops->vring_state_changed(dev, state->index, enable);
342 dev->virtqueue[state->index]->enabled = enable;
348 user_destroy_device(struct vhost_device_ctx ctx)
350 struct virtio_net *dev = get_device(ctx);
352 if (dev && (dev->flags & VIRTIO_DEV_RUNNING))
353 notify_ops->destroy_device(dev);
355 if (dev && dev->mem) {
356 free_mem_region(dev);
363 user_set_protocol_features(struct vhost_device_ctx ctx,
364 uint64_t protocol_features)
366 struct virtio_net *dev;
368 dev = get_device(ctx);
369 if (dev == NULL || protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
372 dev->protocol_features = protocol_features;
376 user_set_log_base(struct vhost_device_ctx ctx,
377 struct VhostUserMsg *msg)
379 struct virtio_net *dev;
380 int fd = msg->fds[0];
384 dev = get_device(ctx);
389 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
393 if (msg->size != sizeof(VhostUserLog)) {
394 RTE_LOG(ERR, VHOST_CONFIG,
395 "invalid log base msg size: %"PRId32" != %d\n",
396 msg->size, (int)sizeof(VhostUserLog));
400 size = msg->payload.log.mmap_size;
401 off = msg->payload.log.mmap_offset;
402 RTE_LOG(INFO, VHOST_CONFIG,
403 "log mmap size: %"PRId64", offset: %"PRId64"\n",
407 * mmap from 0 to workaround a hugepage mmap bug: mmap will
408 * fail when offset is not page size aligned.
410 addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
411 if (addr == MAP_FAILED) {
412 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
416 /* TODO: unmap on stop */
417 dev->log_base = (uint64_t)(uintptr_t)addr + off;
418 dev->log_size = size;
423 #define RARP_BUF_SIZE 64
426 make_rarp_packet(uint8_t *buf, uint8_t *mac)
428 struct ether_header *eth_hdr;
429 struct ether_arp *rarp;
431 /* Ethernet header. */
432 eth_hdr = (struct ether_header *)buf;
433 memset(ð_hdr->ether_dhost, 0xff, ETH_ALEN);
434 memcpy(ð_hdr->ether_shost, mac, ETH_ALEN);
435 eth_hdr->ether_type = htons(ETH_P_RARP);
438 rarp = (struct ether_arp *)(eth_hdr + 1);
439 rarp->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
440 rarp->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
441 rarp->ea_hdr.ar_hln = ETH_ALEN;
442 rarp->ea_hdr.ar_pln = 4;
443 rarp->ea_hdr.ar_op = htons(ARPOP_RREQUEST);
445 memcpy(&rarp->arp_sha, mac, ETH_ALEN);
446 memset(&rarp->arp_spa, 0x00, 4);
447 memcpy(&rarp->arp_tha, mac, 6);
448 memset(&rarp->arp_tpa, 0x00, 4);
453 send_rarp(const char *ifname, uint8_t *rarp)
457 struct sockaddr_ll addr;
459 fd = socket(AF_PACKET, SOCK_RAW, 0);
461 perror("socket failed");
465 memset(&ifr, 0, sizeof(struct ifreq));
466 strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
467 if (ioctl(fd, SIOCGIFINDEX, &ifr) < 0) {
468 perror("failed to get interface index");
473 addr.sll_ifindex = ifr.ifr_ifindex;
474 addr.sll_halen = ETH_ALEN;
476 if (sendto(fd, rarp, RARP_BUF_SIZE, 0,
477 (const struct sockaddr*)&addr, sizeof(addr)) < 0) {
478 perror("send rarp packet failed");
484 * Broadcast a RARP message to all interfaces, to update
488 user_send_rarp(struct VhostUserMsg *msg)
490 uint8_t *mac = (uint8_t *)&msg->payload.u64;
491 uint8_t rarp[RARP_BUF_SIZE];
492 struct ifconf ifc = {0, };
498 RTE_LOG(DEBUG, VHOST_CONFIG,
499 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
500 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
502 make_rarp_packet(rarp, mac);
507 fd = socket(AF_INET, SOCK_DGRAM, 0);
509 perror("failed to create AF_INET socket");
514 ifc.ifc_len = sizeof(*ifr) * nr;
515 ifc.ifc_buf = realloc(ifc.ifc_buf, ifc.ifc_len);
517 if (ioctl(fd, SIOCGIFCONF, &ifc) < 0) {
518 perror("failed at SIOCGIFCONF");
523 if (ifc.ifc_len == (int)sizeof(struct ifreq) * nr) {
525 * current ifc_buf is not big enough to hold
526 * all interfaces; double it and try again.
532 ifr = (struct ifreq *)ifc.ifc_buf;
533 for (i = 0; i < ifc.ifc_len / sizeof(struct ifreq); i++)
534 send_rarp(ifr[i].ifr_name, rarp);