This patch uses the new rte_vhost_va_from_guest_pa() API
to ensure all the descriptor buffer is mapped contiguously
in the application virtual address space.
It does not handle buffers discontiguous in host virtual
address space, but only return an error.
This issue has been assigned CVE-2018-1059.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(1 << VIRTIO_CRYPTO_SERVICE_MAC) | \
(1 << VIRTIO_NET_F_CTRL_VQ))
(1 << VIRTIO_CRYPTO_SERVICE_MAC) | \
(1 << VIRTIO_NET_F_CTRL_VQ))
-#define GPA_TO_VVA(t, m, a) ((t)(uintptr_t)rte_vhost_gpa_to_vva(m, a))
+#define GPA_TO_VVA(t, m, a, l) ((t)(uintptr_t)rte_vhost_va_from_guest_pa(m, a, l))
static int
cipher_algo_transform(uint32_t virtio_cipher_algo)
static int
cipher_algo_transform(uint32_t virtio_cipher_algo)
reach_inhdr(struct vring_desc *head, struct rte_vhost_memory *mem,
struct vring_desc *desc)
{
reach_inhdr(struct vring_desc *head, struct rte_vhost_memory *mem,
struct vring_desc *desc)
{
+ uint64_t dlen;
+ struct virtio_crypto_inhdr *inhdr;
+
while (desc->flags & VRING_DESC_F_NEXT)
desc = &head[desc->next];
while (desc->flags & VRING_DESC_F_NEXT)
desc = &head[desc->next];
- return GPA_TO_VVA(struct virtio_crypto_inhdr *, mem, desc->addr);
+ dlen = desc->len;
+ inhdr = GPA_TO_VVA(struct virtio_crypto_inhdr *, mem, desc->addr, &dlen);
+ if (unlikely(dlen != desc->len))
+ return NULL;
+
+ return inhdr;
}
static __rte_always_inline int
}
static __rte_always_inline int
uint8_t *data = dst_data;
uint8_t *src;
int left = size;
uint8_t *data = dst_data;
uint8_t *src;
int left = size;
rte_prefetch0(&head[desc->next]);
to_copy = RTE_MIN(desc->len, (uint32_t)left);
rte_prefetch0(&head[desc->next]);
to_copy = RTE_MIN(desc->len, (uint32_t)left);
- src = GPA_TO_VVA(uint8_t *, mem, desc->addr);
+ dlen = desc->len;
+ src = GPA_TO_VVA(uint8_t *, mem, desc->addr, &dlen);
+ if (unlikely(!src || dlen != desc->len)) {
+ VC_LOG_ERR("Failed to map descriptor");
+ return -1;
+ }
+
rte_memcpy((uint8_t *)data, src, to_copy);
left -= to_copy;
rte_memcpy((uint8_t *)data, src, to_copy);
left -= to_copy;
desc = &head[desc->next];
rte_prefetch0(&head[desc->next]);
to_copy = RTE_MIN(desc->len, (uint32_t)left);
desc = &head[desc->next];
rte_prefetch0(&head[desc->next]);
to_copy = RTE_MIN(desc->len, (uint32_t)left);
- src = GPA_TO_VVA(uint8_t *, mem, desc->addr);
+ dlen = desc->len;
+ src = GPA_TO_VVA(uint8_t *, mem, desc->addr, &dlen);
+ if (unlikely(!src || dlen != desc->len)) {
+ VC_LOG_ERR("Failed to map descriptor");
+ return -1;
+ }
+
rte_memcpy(data + size - left, src, to_copy);
left -= to_copy;
}
rte_memcpy(data + size - left, src, to_copy);
left -= to_copy;
}
struct vring_desc **cur_desc, uint32_t size)
{
void *data;
struct vring_desc **cur_desc, uint32_t size)
{
void *data;
+ uint64_t dlen = (*cur_desc)->len;
- data = GPA_TO_VVA(void *, mem, (*cur_desc)->addr);
- if (unlikely(!data)) {
- VC_LOG_ERR("Failed to get object");
+ data = GPA_TO_VVA(void *, mem, (*cur_desc)->addr, &dlen);
+ if (unlikely(!data || dlen != (*cur_desc)->len)) {
+ VC_LOG_ERR("Failed to map object");
int left = vc_req->wb_len;
uint32_t to_write;
uint8_t *src_data = mbuf->buf_addr, *dst;
int left = vc_req->wb_len;
uint32_t to_write;
uint8_t *src_data = mbuf->buf_addr, *dst;
rte_prefetch0(&head[desc->next]);
to_write = RTE_MIN(desc->len, (uint32_t)left);
rte_prefetch0(&head[desc->next]);
to_write = RTE_MIN(desc->len, (uint32_t)left);
- dst = GPA_TO_VVA(uint8_t *, mem, desc->addr);
+ dlen = desc->len;
+ dst = GPA_TO_VVA(uint8_t *, mem, desc->addr, &dlen);
+ if (unlikely(!dst || dlen != desc->len)) {
+ VC_LOG_ERR("Failed to map descriptor");
+ return -1;
+ }
+
rte_memcpy(dst, src_data, to_write);
left -= to_write;
src_data += to_write;
rte_memcpy(dst, src_data, to_write);
left -= to_write;
src_data += to_write;
desc = &head[desc->next];
rte_prefetch0(&head[desc->next]);
to_write = RTE_MIN(desc->len, (uint32_t)left);
desc = &head[desc->next];
rte_prefetch0(&head[desc->next]);
to_write = RTE_MIN(desc->len, (uint32_t)left);
- dst = GPA_TO_VVA(uint8_t *, mem, desc->addr);
+ dlen = desc->len;
+ dst = GPA_TO_VVA(uint8_t *, mem, desc->addr, &dlen);
+ if (unlikely(!dst || dlen != desc->len)) {
+ VC_LOG_ERR("Failed to map descriptor");
+ return -1;
+ }
+
rte_memcpy(dst, src_data, to_write);
left -= to_write;
src_data += to_write;
rte_memcpy(dst, src_data, to_write);
left -= to_write;
src_data += to_write;
struct virtio_crypto_inhdr *inhdr;
struct vring_desc *desc = NULL;
uint64_t session_id;
struct virtio_crypto_inhdr *inhdr;
struct vring_desc *desc = NULL;
uint64_t session_id;
int err = 0;
vc_req->desc_idx = desc_idx;
if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
int err = 0;
vc_req->desc_idx = desc_idx;
if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
- head = GPA_TO_VVA(struct vring_desc *, mem, head->addr);
- if (unlikely(!head))
- return 0;
+ dlen = head->len;
+ desc = GPA_TO_VVA(struct vring_desc *, mem, head->addr, &dlen);
+ if (unlikely(!desc || dlen != head->len))
+ return -1;
+ } else {
+ desc = head;
vc_req->mem = mem;
vc_req->head = head;
vc_req->vq = vq;
vc_req->mem = mem;
vc_req->head = head;
vc_req->vq = vq;