{
struct orig_region_map *region;
unsigned int idx;
- uint64_t alignment;
if (!dev || !dev->mem)
return;
region = orig_region(dev->mem, dev->mem->nregions);
for (idx = 0; idx < dev->mem->nregions; idx++) {
if (region[idx].mapped_address) {
- alignment = region[idx].blksz;
- munmap((void *)(uintptr_t)
- RTE_ALIGN_FLOOR(
- region[idx].mapped_address, alignment),
- RTE_ALIGN_CEIL(
- region[idx].mapped_size, alignment));
+ munmap((void *)(uintptr_t)region[idx].mapped_address,
+ region[idx].mapped_size);
close(region[idx].fd);
}
}
/* This is ugly */
mapped_size = memory.regions[idx].memory_size +
memory.regions[idx].mmap_offset;
+
+ /* mmap() without flag of MAP_ANONYMOUS, should be called
+ * with length argument aligned with hugepagesz at older
+ * longterm version Linux, like 2.6.32 and 3.2.72, or
+ * mmap() will fail with EINVAL.
+ *
+ * to avoid failure, make sure in caller to keep length
+ * aligned.
+ */
+ alignment = get_blk_size(pmsg->fds[idx]);
+ mapped_size = RTE_ALIGN_CEIL(mapped_size, alignment);
+
mapped_address = (uint64_t)(uintptr_t)mmap(NULL,
mapped_size,
PROT_READ | PROT_WRITE, MAP_SHARED,
0);
RTE_LOG(INFO, VHOST_CONFIG,
- "mapped region %d fd:%d to %p sz:0x%"PRIx64" off:0x%"PRIx64"\n",
+ "mapped region %d fd:%d to:%p sz:0x%"PRIx64" "
+ "off:0x%"PRIx64" align:0x%"PRIx64"\n",
idx, pmsg->fds[idx], (void *)(uintptr_t)mapped_address,
- mapped_size, memory.regions[idx].mmap_offset);
+ mapped_size, memory.regions[idx].mmap_offset,
+ alignment);
if (mapped_address == (uint64_t)(uintptr_t)MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG,
pregion_orig[idx].mapped_address = mapped_address;
pregion_orig[idx].mapped_size = mapped_size;
- pregion_orig[idx].blksz = get_blk_size(pmsg->fds[idx]);
+ pregion_orig[idx].blksz = alignment;
pregion_orig[idx].fd = pmsg->fds[idx];
mapped_address += memory.regions[idx].mmap_offset;
err_mmap:
while (idx--) {
- alignment = pregion_orig[idx].blksz;
- munmap((void *)(uintptr_t)RTE_ALIGN_FLOOR(
- pregion_orig[idx].mapped_address, alignment),
- RTE_ALIGN_CEIL(pregion_orig[idx].mapped_size,
- alignment));
+ munmap((void *)(uintptr_t)pregion_orig[idx].mapped_address,
+ pregion_orig[idx].mapped_size);
close(pregion_orig[idx].fd);
}
free(dev->mem);
* sent and only sent in vhost_vring_stop.
* TODO: cleanup the vring, it isn't usable since here.
*/
- if (dev->virtqueue[state->index + VIRTIO_RXQ]->kickfd >= 0) {
- close(dev->virtqueue[state->index + VIRTIO_RXQ]->kickfd);
- dev->virtqueue[state->index + VIRTIO_RXQ]->kickfd = -1;
+ if (dev->virtqueue[state->index]->kickfd >= 0) {
+ close(dev->virtqueue[state->index]->kickfd);
+ dev->virtqueue[state->index]->kickfd = -1;
}
- if (dev->virtqueue[state->index + VIRTIO_TXQ]->kickfd >= 0) {
- close(dev->virtqueue[state->index + VIRTIO_TXQ]->kickfd);
- dev->virtqueue[state->index + VIRTIO_TXQ]->kickfd = -1;
+
+ return 0;
+}
+
+/*
+ * when virtio queues are ready to work, qemu will send us to
+ * enable the virtio queue pair.
+ */
+int
+user_set_vring_enable(struct vhost_device_ctx ctx,
+ struct vhost_vring_state *state)
+{
+ struct virtio_net *dev = get_device(ctx);
+ uint16_t base_idx = state->index;
+ int enable = (int)state->num;
+
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "set queue enable: %d to qp idx: %d\n",
+ enable, state->index);
+
+ if (notify_ops->vring_state_changed) {
+ notify_ops->vring_state_changed(dev, base_idx / VIRTIO_QNUM,
+ enable);
}
+ dev->virtqueue[base_idx + VIRTIO_RXQ]->enabled = enable;
+ dev->virtqueue[base_idx + VIRTIO_TXQ]->enabled = enable;
+
return 0;
}