/* Called with iotlb_lock read-locked */
uint64_t
__vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t iova, uint64_t size, uint8_t perm)
+ uint64_t iova, uint64_t *size, uint8_t perm)
{
uint64_t vva, tmp_size;
- if (unlikely(!size))
+ if (unlikely(!*size))
return 0;
- tmp_size = size;
+ tmp_size = *size;
vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
- if (tmp_size == size)
+ if (tmp_size == *size)
return vva;
iova += tmp_size;
int
vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- uint64_t size;
+ uint64_t req_size, size;
if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
goto out;
- size = sizeof(struct vring_desc) * vq->size;
+ req_size = sizeof(struct vring_desc) * vq->size;
+ size = req_size;
vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.desc_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->desc)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->desc || size != req_size)
return -1;
- size = sizeof(struct vring_avail);
- size += sizeof(uint16_t) * vq->size;
+ req_size = sizeof(struct vring_avail);
+ req_size += sizeof(uint16_t) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ req_size += sizeof(uint16_t);
+ size = req_size;
vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.avail_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->avail)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->avail || size != req_size)
return -1;
- size = sizeof(struct vring_used);
- size += sizeof(struct vring_used_elem) * vq->size;
+ req_size = sizeof(struct vring_used);
+ req_size += sizeof(struct vring_used_elem) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ req_size += sizeof(uint16_t);
+ size = req_size;
vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.used_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->used)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->used || size != req_size)
return -1;
out:
void vhost_backend_cleanup(struct virtio_net *dev);
uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t iova, uint64_t size, uint8_t perm);
+ uint64_t iova, uint64_t *len, uint8_t perm);
int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
static __rte_always_inline uint64_t
vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t iova, uint64_t size, uint8_t perm)
+ uint64_t iova, uint64_t *len, uint8_t perm)
{
if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
return rte_vhost_gpa_to_vva(dev->mem, iova);
- return __vhost_iova_to_vva(dev, vq, iova, size, perm);
+ return __vhost_iova_to_vva(dev, vq, iova, len, perm);
}
#define vhost_used_event(vr) \
uint32_t desc_avail, desc_offset;
uint32_t mbuf_avail, mbuf_offset;
uint32_t cpy_len;
+ uint64_t dlen;
struct vring_desc *desc;
uint64_t desc_addr;
/* A counter to avoid desc dead loop chain */
int error = 0;
desc = &descs[desc_idx];
+ dlen = desc->len;
desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
- desc->len, VHOST_ACCESS_RW);
+ &dlen, VHOST_ACCESS_RW);
/*
* Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
* performance issue with some versions of gcc (4.8.4 and 5.3.0) which
* otherwise stores offset on the stack instead of in a register.
*/
- if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) {
+ if (unlikely(dlen != desc->len || desc->len < dev->vhost_hlen) ||
+ !desc_addr) {
error = -1;
goto out;
}
}
desc = &descs[desc->next];
+ dlen = desc->len;
desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
- desc->len,
+ &dlen,
VHOST_ACCESS_RW);
- if (unlikely(!desc_addr)) {
+ if (unlikely(!desc_addr || dlen != desc->len)) {
error = -1;
goto out;
}
int err;
if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
+ uint64_t dlen = vq->desc[desc_idx].len;
descs = (struct vring_desc *)(uintptr_t)
vhost_iova_to_vva(dev,
vq, vq->desc[desc_idx].addr,
- vq->desc[desc_idx].len,
- VHOST_ACCESS_RO);
- if (unlikely(!descs)) {
+ &dlen, VHOST_ACCESS_RO);
+ if (unlikely(!descs ||
+ dlen != vq->desc[desc_idx].len)) {
count = i;
break;
}
uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
uint32_t vec_id = *vec_idx;
uint32_t len = 0;
+ uint64_t dlen;
struct vring_desc *descs = vq->desc;
*desc_chain_head = idx;
if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
+ dlen = vq->desc[idx].len;
descs = (struct vring_desc *)(uintptr_t)
vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
- vq->desc[idx].len,
+ &dlen,
VHOST_ACCESS_RO);
- if (unlikely(!descs))
+ if (unlikely(!descs || dlen != vq->desc[idx].len))
return -1;
idx = 0;
uint32_t mbuf_offset, mbuf_avail;
uint32_t desc_offset, desc_avail;
uint32_t cpy_len;
+ uint64_t dlen;
uint64_t hdr_addr, hdr_phys_addr;
struct rte_mbuf *hdr_mbuf;
struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
goto out;
}
+ dlen = buf_vec[vec_idx].buf_len;
desc_addr = vhost_iova_to_vva(dev, vq, buf_vec[vec_idx].buf_addr,
- buf_vec[vec_idx].buf_len,
- VHOST_ACCESS_RW);
- if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) {
+ &dlen, VHOST_ACCESS_RW);
+ if (dlen != buf_vec[vec_idx].buf_len ||
+ buf_vec[vec_idx].buf_len < dev->vhost_hlen ||
+ !desc_addr) {
error = -1;
goto out;
}
/* done with current desc buf, get the next one */
if (desc_avail == 0) {
vec_idx++;
+ dlen = buf_vec[vec_idx].buf_len;
desc_addr =
vhost_iova_to_vva(dev, vq,
buf_vec[vec_idx].buf_addr,
- buf_vec[vec_idx].buf_len,
+ &dlen,
VHOST_ACCESS_RW);
- if (unlikely(!desc_addr)) {
+ if (unlikely(!desc_addr ||
+ dlen != buf_vec[vec_idx].buf_len)) {
error = -1;
goto out;
}
uint32_t desc_avail, desc_offset;
uint32_t mbuf_avail, mbuf_offset;
uint32_t cpy_len;
+ uint64_t dlen;
struct rte_mbuf *cur = m, *prev = m;
struct virtio_net_hdr *hdr = NULL;
/* A counter to avoid desc dead loop chain */
goto out;
}
+ dlen = desc->len;
desc_addr = vhost_iova_to_vva(dev,
vq, desc->addr,
- desc->len,
+ &dlen,
VHOST_ACCESS_RO);
- if (unlikely(!desc_addr)) {
+ if (unlikely(!desc_addr || dlen != desc->len)) {
error = -1;
goto out;
}
goto out;
}
+ dlen = desc->len;
desc_addr = vhost_iova_to_vva(dev,
vq, desc->addr,
- desc->len,
+ &dlen,
VHOST_ACCESS_RO);
- if (unlikely(!desc_addr)) {
+ if (unlikely(!desc_addr || dlen != desc->len)) {
error = -1;
goto out;
}
goto out;
}
+ dlen = desc->len;
desc_addr = vhost_iova_to_vva(dev,
vq, desc->addr,
- desc->len,
- VHOST_ACCESS_RO);
- if (unlikely(!desc_addr)) {
+ &dlen, VHOST_ACCESS_RO);
+ if (unlikely(!desc_addr || dlen != desc->len)) {
error = -1;
goto out;
}
for (i = 0; i < count; i++) {
struct vring_desc *desc;
uint16_t sz, idx;
+ uint64_t dlen;
int err;
if (likely(i + 1 < count))
rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
+ dlen = vq->desc[desc_indexes[i]].len;
desc = (struct vring_desc *)(uintptr_t)
vhost_iova_to_vva(dev, vq,
vq->desc[desc_indexes[i]].addr,
- vq->desc[desc_indexes[i]].len,
+ &dlen,
VHOST_ACCESS_RO);
- if (unlikely(!desc))
+ if (unlikely(!desc ||
+ dlen != vq->desc[desc_indexes[i]].len))
break;
rte_prefetch0(desc);