X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost_user.c;h=ad2e8d3802f24ac36dd5d9dccad59bb4de7ea167;hb=aa8c595aca35e6ed1ce487d63617eeb7ea729cf0;hp=a92377a5a3e093c9c837ebf42abebd2fef52bb23;hpb=f6be82d7259ee35683721092d61283d99a47aff1;p=dpdk.git diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index a92377a5a3..ad2e8d3802 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -51,6 +51,9 @@ #include "vhost.h" #include "vhost_user.h" +#define VIRTIO_MIN_MTU 68 +#define VIRTIO_MAX_MTU 65535 + static const char *vhost_message_str[VHOST_USER_MAX] = { [VHOST_USER_NONE] = "VHOST_USER_NONE", [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES", @@ -72,6 +75,7 @@ static const char *vhost_message_str[VHOST_USER_MAX] = { [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM", [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE", [VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP", + [VHOST_USER_NET_SET_MTU] = "VHOST_USER_NET_SET_MTU", }; static uint64_t @@ -88,7 +92,7 @@ static void free_mem_region(struct virtio_net *dev) { uint32_t i; - struct virtio_memory_region *reg; + struct rte_vhost_mem_region *reg; if (!dev || !dev->mem) return; @@ -110,6 +114,10 @@ vhost_backend_cleanup(struct virtio_net *dev) rte_free(dev->mem); dev->mem = NULL; } + + free(dev->guest_pages); + dev->guest_pages = NULL; + if (dev->log_addr) { munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); dev->log_addr = 0; @@ -131,7 +139,7 @@ vhost_user_reset_owner(struct virtio_net *dev) { if (dev->flags & VIRTIO_DEV_RUNNING) { dev->flags &= ~VIRTIO_DEV_RUNNING; - notify_ops->destroy_device(dev->vid); + dev->notify_ops->destroy_device(dev->vid); } cleanup_device(dev, 0); @@ -143,9 +151,12 @@ vhost_user_reset_owner(struct virtio_net *dev) * The features that we support are requested. */ static uint64_t -vhost_user_get_features(void) +vhost_user_get_features(struct virtio_net *dev) { - return VHOST_FEATURES; + uint64_t features = 0; + + rte_vhost_driver_get_features(dev->ifname, &features); + return features; } /* @@ -154,8 +165,20 @@ vhost_user_get_features(void) static int vhost_user_set_features(struct virtio_net *dev, uint64_t features) { - if (features & ~VHOST_FEATURES) + uint64_t vhost_features = 0; + + rte_vhost_driver_get_features(dev->ifname, &vhost_features); + if (features & ~vhost_features) { + RTE_LOG(ERR, VHOST_CONFIG, + "(%d) received invalid negotiated features.\n", + dev->vid); return -1; + } + + if ((dev->flags & VIRTIO_DEV_RUNNING) && dev->features != features) { + if (dev->notify_ops->features_changed) + dev->notify_ops->features_changed(dev->vid, features); + } dev->features = features; if (dev->features & @@ -178,9 +201,34 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features) */ static int vhost_user_set_vring_num(struct virtio_net *dev, - struct vhost_vring_state *state) + VhostUserMsg *msg) { - dev->virtqueue[state->index]->size = state->num; + struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index]; + + vq->size = msg->payload.state.num; + + if (dev->dequeue_zero_copy) { + vq->nr_zmbuf = 0; + vq->last_zmbuf_idx = 0; + vq->zmbuf_size = vq->size; + vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size * + sizeof(struct zcopy_mbuf), 0); + if (vq->zmbufs == NULL) { + RTE_LOG(WARNING, VHOST_CONFIG, + "failed to allocate mem for zero copy; " + "zero copy is force disabled\n"); + dev->dequeue_zero_copy = 0; + } + } + + vq->shadow_used_ring = rte_malloc(NULL, + vq->size * sizeof(struct vring_used_elem), + RTE_CACHE_LINE_SIZE); + if (!vq->shadow_used_ring) { + RTE_LOG(ERR, VHOST_CONFIG, + "failed to allocate memory for shadow used ring.\n"); + return -1; + } return 0; } @@ -198,13 +246,6 @@ numa_realloc(struct virtio_net *dev, int index) struct vhost_virtqueue *old_vq, *vq; int ret; - /* - * vq is allocated on pairs, we should try to do realloc - * on first queue of one queue pair only. - */ - if (index % VIRTIO_QNUM != 0) - return dev; - old_dev = dev; vq = old_vq = dev->virtqueue[index]; @@ -222,12 +263,11 @@ numa_realloc(struct virtio_net *dev, int index) if (oldnode != newnode) { RTE_LOG(INFO, VHOST_CONFIG, "reallocate vq from %d to %d node\n", oldnode, newnode); - vq = rte_malloc_socket(NULL, sizeof(*vq) * VIRTIO_QNUM, 0, - newnode); + vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode); if (!vq) return dev; - memcpy(vq, old_vq, sizeof(*vq) * VIRTIO_QNUM); + memcpy(vq, old_vq, sizeof(*vq)); rte_free(old_vq); } @@ -255,7 +295,6 @@ numa_realloc(struct virtio_net *dev, int index) out: dev->virtqueue[index] = vq; - dev->virtqueue[index + 1] = vq + 1; vhost_devices[dev->vid] = dev; return dev; @@ -275,7 +314,7 @@ numa_realloc(struct virtio_net *dev, int index __rte_unused) static uint64_t qva_to_vva(struct virtio_net *dev, uint64_t qva) { - struct virtio_memory_region *reg; + struct rte_vhost_mem_region *reg; uint32_t i; /* Find the region where the address lives. */ @@ -297,7 +336,7 @@ qva_to_vva(struct virtio_net *dev, uint64_t qva) * This function then converts these to our address space. */ static int -vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr) +vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg) { struct vhost_virtqueue *vq; @@ -305,11 +344,11 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr) return -1; /* addr->index refers to the queue index. The txq 1, rxq is 0. */ - vq = dev->virtqueue[addr->index]; + vq = dev->virtqueue[msg->payload.addr.index]; /* The addresses are converted from QEMU virtual to Vhost virtual. */ vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev, - addr->desc_user_addr); + msg->payload.addr.desc_user_addr); if (vq->desc == 0) { RTE_LOG(ERR, VHOST_CONFIG, "(%d) failed to find desc ring address.\n", @@ -317,11 +356,11 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr) return -1; } - dev = numa_realloc(dev, addr->index); - vq = dev->virtqueue[addr->index]; + dev = numa_realloc(dev, msg->payload.addr.index); + vq = dev->virtqueue[msg->payload.addr.index]; vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev, - addr->avail_user_addr); + msg->payload.addr.avail_user_addr); if (vq->avail == 0) { RTE_LOG(ERR, VHOST_CONFIG, "(%d) failed to find avail ring address.\n", @@ -330,7 +369,7 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr) } vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev, - addr->used_user_addr); + msg->payload.addr.used_user_addr); if (vq->used == 0) { RTE_LOG(ERR, VHOST_CONFIG, "(%d) failed to find used ring address.\n", @@ -347,7 +386,7 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr) vq->last_avail_idx = vq->used->idx; } - vq->log_guest_addr = addr->log_guest_addr; + vq->log_guest_addr = msg->payload.addr.log_guest_addr; LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n", dev->vid, vq->desc); @@ -366,10 +405,12 @@ vhost_user_set_vring_addr(struct virtio_net *dev, struct vhost_vring_addr *addr) */ static int vhost_user_set_vring_base(struct virtio_net *dev, - struct vhost_vring_state *state) + VhostUserMsg *msg) { - dev->virtqueue[state->index]->last_used_idx = state->num; - dev->virtqueue[state->index]->last_avail_idx = state->num; + dev->virtqueue[msg->payload.state.index]->last_used_idx = + msg->payload.state.num; + dev->virtqueue[msg->payload.state.index]->last_avail_idx = + msg->payload.state.num; return 0; } @@ -403,7 +444,7 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, } static void -add_guest_pages(struct virtio_net *dev, struct virtio_memory_region *reg, +add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, uint64_t page_size) { uint64_t reg_size = reg->size; @@ -422,14 +463,14 @@ add_guest_pages(struct virtio_net *dev, struct virtio_memory_region *reg, reg_size -= size; while (reg_size > 0) { + size = RTE_MIN(reg_size, page_size); host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t) host_user_addr); - add_one_guest_page(dev, guest_phys_addr, host_phys_addr, - page_size); + add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size); - host_user_addr += page_size; - guest_phys_addr += page_size; - reg_size -= page_size; + host_user_addr += size; + guest_phys_addr += size; + reg_size -= size; } } @@ -463,7 +504,7 @@ static int vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) { struct VhostUserMemory memory = pmsg->payload.memory; - struct virtio_memory_region *reg; + struct rte_vhost_mem_region *reg; void *mmap_addr; uint64_t mmap_size; uint64_t mmap_offset; @@ -471,12 +512,6 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) uint32_t i; int fd; - /* Remove from the data plane. */ - if (dev->flags & VIRTIO_DEV_RUNNING) { - dev->flags &= ~VIRTIO_DEV_RUNNING; - notify_ops->destroy_device(dev->vid); - } - if (dev->mem) { free_mem_region(dev); rte_free(dev->mem); @@ -488,10 +523,17 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) dev->max_guest_pages = 8; dev->guest_pages = malloc(dev->max_guest_pages * sizeof(struct guest_page)); + if (dev->guest_pages == NULL) { + RTE_LOG(ERR, VHOST_CONFIG, + "(%d) failed to allocate memory " + "for dev->guest_pages\n", + dev->vid); + return -1; + } } - dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct virtio_memory) + - sizeof(struct virtio_memory_region) * memory.nregions, 0); + dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) + + sizeof(struct rte_vhost_mem_region) * memory.nregions, 0); if (dev->mem == NULL) { RTE_LOG(ERR, VHOST_CONFIG, "(%d) failed to allocate memory for dev->mem\n", @@ -542,7 +584,8 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset; - add_guest_pages(dev, reg, alignment); + if (dev->dequeue_zero_copy) + add_guest_pages(dev, reg, alignment); RTE_LOG(INFO, VHOST_CONFIG, "guest memory region %u, size: 0x%" PRIx64 "\n" @@ -585,18 +628,17 @@ vq_is_ready(struct vhost_virtqueue *vq) static int virtio_is_ready(struct virtio_net *dev) { - struct vhost_virtqueue *rvq, *tvq; + struct vhost_virtqueue *vq; uint32_t i; - for (i = 0; i < dev->virt_qp_nb; i++) { - rvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ]; - tvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ]; + if (dev->nr_vring == 0) + return 0; - if (!vq_is_ready(rvq) || !vq_is_ready(tvq)) { - RTE_LOG(INFO, VHOST_CONFIG, - "virtio is not ready for processing.\n"); + for (i = 0; i < dev->nr_vring; i++) { + vq = dev->virtqueue[i]; + + if (!vq_is_ready(vq)) return 0; - } } RTE_LOG(INFO, VHOST_CONFIG, @@ -609,7 +651,6 @@ vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg) { struct vhost_vring_file file; struct vhost_virtqueue *vq; - uint32_t cur_qp_idx; file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) @@ -619,29 +660,13 @@ vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg) RTE_LOG(INFO, VHOST_CONFIG, "vring call idx:%d file:%d\n", file.index, file.fd); - /* - * FIXME: VHOST_SET_VRING_CALL is the first per-vring message - * we get, so we do vring queue pair allocation here. - */ - cur_qp_idx = file.index / VIRTIO_QNUM; - if (cur_qp_idx + 1 > dev->virt_qp_nb) { - if (alloc_vring_queue_pair(dev, cur_qp_idx) < 0) - return; - } - vq = dev->virtqueue[file.index]; - assert(vq != NULL); - if (vq->callfd >= 0) close(vq->callfd); vq->callfd = file.fd; } -/* - * In vhost-user, when we receive kick message, will test whether virtio - * device is ready for packet processing. - */ static void vhost_user_set_vring_kick(struct virtio_net *dev, struct VhostUserMsg *pmsg) { @@ -660,11 +685,22 @@ vhost_user_set_vring_kick(struct virtio_net *dev, struct VhostUserMsg *pmsg) if (vq->kickfd >= 0) close(vq->kickfd); vq->kickfd = file.fd; +} - if (virtio_is_ready(dev) && !(dev->flags & VIRTIO_DEV_RUNNING)) { - if (notify_ops->new_device(dev->vid) == 0) - dev->flags |= VIRTIO_DEV_RUNNING; +static void +free_zmbufs(struct vhost_virtqueue *vq) +{ + struct zcopy_mbuf *zmbuf, *next; + + for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); + zmbuf != NULL; zmbuf = next) { + next = TAILQ_NEXT(zmbuf, next); + + rte_pktmbuf_free(zmbuf->mbuf); + TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); } + + rte_free(vq->zmbufs); } /* @@ -672,28 +708,38 @@ vhost_user_set_vring_kick(struct virtio_net *dev, struct VhostUserMsg *pmsg) */ static int vhost_user_get_vring_base(struct virtio_net *dev, - struct vhost_vring_state *state) + VhostUserMsg *msg) { + struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index]; + /* We have to stop the queue (virtio) if it is running. */ if (dev->flags & VIRTIO_DEV_RUNNING) { dev->flags &= ~VIRTIO_DEV_RUNNING; - notify_ops->destroy_device(dev->vid); + dev->notify_ops->destroy_device(dev->vid); } + dev->flags &= ~VIRTIO_DEV_READY; + /* Here we are safe to get the last used index */ - state->num = dev->virtqueue[state->index]->last_used_idx; + msg->payload.state.num = vq->last_used_idx; RTE_LOG(INFO, VHOST_CONFIG, - "vring base idx:%d file:%d\n", state->index, state->num); + "vring base idx:%d file:%d\n", msg->payload.state.index, + msg->payload.state.num); /* * Based on current qemu vhost-user implementation, this message is * sent and only sent in vhost_vring_stop. * TODO: cleanup the vring, it isn't usable since here. */ - if (dev->virtqueue[state->index]->kickfd >= 0) - close(dev->virtqueue[state->index]->kickfd); + if (vq->kickfd >= 0) + close(vq->kickfd); + + vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; - dev->virtqueue[state->index]->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; + if (dev->dequeue_zero_copy) + free_zmbufs(vq); + rte_free(vq->shadow_used_ring); + vq->shadow_used_ring = NULL; return 0; } @@ -704,18 +750,19 @@ vhost_user_get_vring_base(struct virtio_net *dev, */ static int vhost_user_set_vring_enable(struct virtio_net *dev, - struct vhost_vring_state *state) + VhostUserMsg *msg) { - int enable = (int)state->num; + int enable = (int)msg->payload.state.num; RTE_LOG(INFO, VHOST_CONFIG, "set queue enable: %d to qp idx: %d\n", - enable, state->index); + enable, msg->payload.state.index); - if (notify_ops->vring_state_changed) - notify_ops->vring_state_changed(dev->vid, state->index, enable); + if (dev->notify_ops->vring_state_changed) + dev->notify_ops->vring_state_changed(dev->vid, + msg->payload.state.index, enable); - dev->virtqueue[state->index]->enabled = enable; + dev->virtqueue[msg->payload.state.index]->enabled = enable; return 0; } @@ -811,6 +858,22 @@ vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg) return 0; } +static int +vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg) +{ + if (msg->payload.u64 < VIRTIO_MIN_MTU || + msg->payload.u64 > VIRTIO_MAX_MTU) { + RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n", + msg->payload.u64); + + return -1; + } + + dev->mtu = msg->payload.u64; + + return 0; +} + /* return bytes# of read on success or negative val on failure. */ static int read_vhost_message(int sockfd, struct VhostUserMsg *msg) @@ -850,6 +913,7 @@ send_vhost_message(int sockfd, struct VhostUserMsg *msg) return 0; msg->flags &= ~VHOST_USER_VERSION_MASK; + msg->flags &= ~VHOST_USER_NEED_REPLY; msg->flags |= VHOST_USER_VERSION; msg->flags |= VHOST_USER_REPLY_MASK; @@ -859,6 +923,44 @@ send_vhost_message(int sockfd, struct VhostUserMsg *msg) return ret; } +/* + * Allocate a queue pair if it hasn't been allocated yet + */ +static int +vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg) +{ + uint16_t vring_idx; + + switch (msg->request) { + case VHOST_USER_SET_VRING_KICK: + case VHOST_USER_SET_VRING_CALL: + case VHOST_USER_SET_VRING_ERR: + vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK; + break; + case VHOST_USER_SET_VRING_NUM: + case VHOST_USER_SET_VRING_BASE: + case VHOST_USER_SET_VRING_ENABLE: + vring_idx = msg->payload.state.index; + break; + case VHOST_USER_SET_VRING_ADDR: + vring_idx = msg->payload.addr.index; + break; + default: + return 0; + } + + if (vring_idx >= VHOST_MAX_VRING) { + RTE_LOG(ERR, VHOST_CONFIG, + "invalid vring index: %u\n", vring_idx); + return -1; + } + + if (dev->virtqueue[vring_idx]) + return 0; + + return alloc_vring_queue(dev, vring_idx); +} + int vhost_user_msg_handler(int vid, int fd) { @@ -870,6 +972,16 @@ vhost_user_msg_handler(int vid, int fd) if (dev == NULL) return -1; + if (!dev->notify_ops) { + dev->notify_ops = vhost_driver_callback_get(dev->ifname); + if (!dev->notify_ops) { + RTE_LOG(ERR, VHOST_CONFIG, + "failed to get callback ops for driver %s\n", + dev->ifname); + return -1; + } + } + ret = read_vhost_message(fd, &msg); if (ret <= 0 || msg.request >= VHOST_USER_MAX) { if (ret < 0) @@ -885,11 +997,20 @@ vhost_user_msg_handler(int vid, int fd) return -1; } + ret = 0; RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n", vhost_message_str[msg.request]); + + ret = vhost_user_check_and_alloc_queue_pair(dev, &msg); + if (ret < 0) { + RTE_LOG(ERR, VHOST_CONFIG, + "failed to alloc queue\n"); + return -1; + } + switch (msg.request) { case VHOST_USER_GET_FEATURES: - msg.payload.u64 = vhost_user_get_features(); + msg.payload.u64 = vhost_user_get_features(dev); msg.size = sizeof(msg.payload.u64); send_vhost_message(fd, &msg); break; @@ -914,7 +1035,7 @@ vhost_user_msg_handler(int vid, int fd) break; case VHOST_USER_SET_MEM_TABLE: - vhost_user_set_mem_table(dev, &msg); + ret = vhost_user_set_mem_table(dev, &msg); break; case VHOST_USER_SET_LOG_BASE: @@ -930,17 +1051,17 @@ vhost_user_msg_handler(int vid, int fd) break; case VHOST_USER_SET_VRING_NUM: - vhost_user_set_vring_num(dev, &msg.payload.state); + vhost_user_set_vring_num(dev, &msg); break; case VHOST_USER_SET_VRING_ADDR: - vhost_user_set_vring_addr(dev, &msg.payload.addr); + vhost_user_set_vring_addr(dev, &msg); break; case VHOST_USER_SET_VRING_BASE: - vhost_user_set_vring_base(dev, &msg.payload.state); + vhost_user_set_vring_base(dev, &msg); break; case VHOST_USER_GET_VRING_BASE: - ret = vhost_user_get_vring_base(dev, &msg.payload.state); + vhost_user_get_vring_base(dev, &msg); msg.size = sizeof(msg.payload.state); send_vhost_message(fd, &msg); break; @@ -965,16 +1086,41 @@ vhost_user_msg_handler(int vid, int fd) break; case VHOST_USER_SET_VRING_ENABLE: - vhost_user_set_vring_enable(dev, &msg.payload.state); + vhost_user_set_vring_enable(dev, &msg); break; case VHOST_USER_SEND_RARP: vhost_user_send_rarp(dev, &msg); break; + case VHOST_USER_NET_SET_MTU: + ret = vhost_user_net_set_mtu(dev, &msg); + break; + default: + ret = -1; break; } + if (msg.flags & VHOST_USER_NEED_REPLY) { + msg.payload.u64 = !!ret; + msg.size = sizeof(msg.payload.u64); + send_vhost_message(fd, &msg); + } + + if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) { + dev->flags |= VIRTIO_DEV_READY; + + if (!(dev->flags & VIRTIO_DEV_RUNNING)) { + if (dev->dequeue_zero_copy) { + RTE_LOG(INFO, VHOST_CONFIG, + "dequeue zero copy is enabled\n"); + } + + if (dev->notify_ops->new_device(dev->vid) == 0) + dev->flags |= VIRTIO_DEV_RUNNING; + } + } + return 0; }