while (idx_m != idx) {
/* avail entry copy */
desc_id = vq->avail->ring[idx_m & (vq->size - 1)];
+ if (unlikely(desc_id >= vq->size))
+ return -1;
+
s_vring->avail->ring[idx_m & (vq->size - 1)] = desc_id;
desc_ring = vq->desc;
/* check if the buf addr is within the guest memory */
do {
+ if (unlikely(desc_id >= vq->size))
+ goto fail;
desc = desc_ring[desc_id];
perm = desc.flags & VRING_DESC_F_WRITE ?
VHOST_ACCESS_WO : VHOST_ACCESS_RO;
if (invalid_desc_check(dev, vq, desc.addr, desc.len,
- perm)) {
- if (unlikely(idesc))
- free_ind_table(idesc);
- return -1;
- }
+ perm))
+ goto fail;
desc_id = desc.next;
} while (desc.flags & VRING_DESC_F_NEXT);
vhost_avail_event(vq) = idx;
return ret;
+
+fail:
+ if (unlikely(idesc))
+ free_ind_table(idesc);
+ return -1;
}
int __rte_experimental
desc_id = vq->used->ring[idx & (vq->size - 1)].id;
desc_ring = vq->desc;
+ if (unlikely(desc_id >= vq->size))
+ return -1;
+
if (vq->desc[desc_id].flags & VRING_DESC_F_INDIRECT) {
dlen = vq->desc[desc_id].len;
desc_ring = (struct vring_desc *)(uintptr_t)
/* dirty page logging for DMA writeable buffer */
do {
+ if (unlikely(desc_id >= vq->size))
+ goto fail;
desc = desc_ring[desc_id];
if (desc.flags & VRING_DESC_F_WRITE)
vhost_log_write(dev, desc.addr, desc.len);
vring_used_event(s_vring) = idx_m;
return ret;
+
+fail:
+ if (unlikely(idesc))
+ free_ind_table(idesc);
+ return -1;
}