struct guest_page {
uint64_t guest_phys_addr;
- uint64_t host_phys_addr;
+ uint64_t host_iova;
uint64_t size;
};
if (gpa + gpa_size <=
page->guest_phys_addr + page->size) {
return gpa - page->guest_phys_addr +
- page->host_phys_addr;
+ page->host_iova;
} else if (gpa < page->guest_phys_addr +
page->size) {
*hpa_size = page->guest_phys_addr +
page->size - gpa;
return gpa - page->guest_phys_addr +
- page->host_phys_addr;
+ page->host_iova;
}
}
} else {
if (gpa + gpa_size <=
page->guest_phys_addr + page->size) {
return gpa - page->guest_phys_addr +
- page->host_phys_addr;
+ page->host_iova;
} else if (gpa < page->guest_phys_addr +
page->size) {
*hpa_size = page->guest_phys_addr +
page->size - gpa;
return gpa - page->guest_phys_addr +
- page->host_phys_addr;
+ page->host_iova;
}
}
}
static int
add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
- uint64_t host_phys_addr, uint64_t size)
+ uint64_t host_iova, uint64_t size)
{
struct guest_page *page, *last_page;
struct guest_page *old_pages;
if (dev->nr_guest_pages > 0) {
last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
/* merge if the two pages are continuous */
- if (host_phys_addr == last_page->host_phys_addr +
+ if (host_iova == last_page->host_iova +
last_page->size) {
last_page->size += size;
return 0;
page = &dev->guest_pages[dev->nr_guest_pages++];
page->guest_phys_addr = guest_phys_addr;
- page->host_phys_addr = host_phys_addr;
+ page->host_iova = host_iova;
page->size = size;
return 0;
uint64_t reg_size = reg->size;
uint64_t host_user_addr = reg->host_user_addr;
uint64_t guest_phys_addr = reg->guest_phys_addr;
- uint64_t host_phys_addr;
+ uint64_t host_iova;
uint64_t size;
- host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
+ host_iova = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
size = page_size - (guest_phys_addr & (page_size - 1));
size = RTE_MIN(size, reg_size);
- if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
+ if (add_one_guest_page(dev, guest_phys_addr, host_iova, size) < 0)
return -1;
host_user_addr += size;
while (reg_size > 0) {
size = RTE_MIN(reg_size, page_size);
- host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
+ host_iova = rte_mem_virt2iova((void *)(uintptr_t)
host_user_addr);
- if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
+ if (add_one_guest_page(dev, guest_phys_addr, host_iova,
size) < 0)
return -1;
dev->ifname, i);
VHOST_LOG_CONFIG(INFO, "(%s)\tguest_phys_addr: %" PRIx64 "\n",
dev->ifname, page->guest_phys_addr);
- VHOST_LOG_CONFIG(INFO, "(%s)\thost_phys_addr : %" PRIx64 "\n",
- dev->ifname, page->host_phys_addr);
+ VHOST_LOG_CONFIG(INFO, "(%s)\thost_iova : %" PRIx64 "\n",
+ dev->ifname, page->host_iova);
VHOST_LOG_CONFIG(INFO, "(%s)\tsize : %" PRIx64 "\n",
dev->ifname, page->size);
}
struct vhost_async *async = vq->async;
uint64_t mapped_len;
uint32_t buf_offset = 0;
- void *hpa;
+ void *host_iova;
while (cpy_len) {
- hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
+ host_iova = (void *)(uintptr_t)gpa_to_first_hpa(dev,
buf_iova + buf_offset, cpy_len, &mapped_len);
- if (unlikely(!hpa)) {
- VHOST_LOG_DATA(ERR, "(%s) %s: failed to get hpa.\n", dev->ifname, __func__);
+ if (unlikely(!host_iova)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: failed to get host iova.\n",
+ dev->ifname, __func__);
return -1;
}
if (unlikely(async_iter_add_iovec(dev, async,
(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
mbuf_offset),
- hpa, (size_t)mapped_len)))
+ host_iova, (size_t)mapped_len)))
return -1;
cpy_len -= (uint32_t)mapped_len;