}
static __rte_always_inline struct vring_desc *
-find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+find_write_desc(struct vring_desc *head, struct vring_desc *desc,
+ uint32_t *nb_descs)
{
if (desc->flags & VRING_DESC_F_WRITE)
return desc;
while (desc->flags & VRING_DESC_F_NEXT) {
+ if (unlikely(*nb_descs == 0))
+ return NULL;
+ (*nb_descs)--;
+
desc = &head[desc->next];
if (desc->flags & VRING_DESC_F_WRITE)
return desc;
}
static struct virtio_crypto_inhdr *
-reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
+reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
+ uint32_t *nb_descs)
{
uint64_t dlen;
struct virtio_crypto_inhdr *inhdr;
- while (desc->flags & VRING_DESC_F_NEXT)
+ while (desc->flags & VRING_DESC_F_NEXT) {
+ if (unlikely(*nb_descs == 0))
+ return NULL;
+ (*nb_descs)--;
desc = &vc_req->head[desc->next];
+ }
dlen = desc->len;
inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
static __rte_always_inline int
move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
- uint32_t size)
+ uint32_t size, uint32_t *nb_descs)
{
struct vring_desc *desc = *cur_desc;
- int left = size;
-
- rte_prefetch0(&head[desc->next]);
- left -= desc->len;
+ int left = size - desc->len;
while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+ (*nb_descs)--;
+ if (unlikely(*nb_descs == 0))
+ return -1;
+
desc = &head[desc->next];
rte_prefetch0(&head[desc->next]);
left -= desc->len;
if (unlikely(left > 0))
return -1;
- *cur_desc = &head[desc->next];
+ if (unlikely(*nb_descs == 0))
+ *cur_desc = NULL;
+ else
+ *cur_desc = &head[desc->next];
return 0;
}
static int
copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
- struct vring_desc **cur_desc, uint32_t size)
+ struct vring_desc **cur_desc, uint32_t size, uint32_t *nb_descs)
{
struct vring_desc *desc = *cur_desc;
uint64_t remain, addr, dlen, len;
uint8_t *src;
int left = size;
- rte_prefetch0(&vc_req->head[desc->next]);
to_copy = RTE_MIN(desc->len, (uint32_t)left);
dlen = to_copy;
src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
left -= to_copy;
while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+ if (unlikely(*nb_descs == 0)) {
+ VC_LOG_ERR("Invalid descriptors");
+ return -1;
+ }
+ (*nb_descs)--;
+
desc = &vc_req->head[desc->next];
rte_prefetch0(&vc_req->head[desc->next]);
to_copy = RTE_MIN(desc->len, (uint32_t)left);
return -1;
}
- *cur_desc = &vc_req->head[desc->next];
+ if (unlikely(*nb_descs == 0))
+ *cur_desc = NULL;
+ else
+ *cur_desc = &vc_req->head[desc->next];
return 0;
}
struct vhost_crypto_writeback_data **end_wb_data,
uint8_t *src,
uint32_t offset,
- uint64_t write_back_len)
+ uint64_t write_back_len,
+ uint32_t *nb_descs)
{
struct vhost_crypto_writeback_data *wb_data, *head;
struct vring_desc *desc = *cur_desc;
offset -= desc->len;
while (write_back_len) {
+ if (unlikely(*nb_descs == 0)) {
+ VC_LOG_ERR("Invalid descriptors");
+ goto error_exit;
+ }
+ (*nb_descs)--;
+
desc = &vc_req->head[desc->next];
if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
VC_LOG_ERR("incorrect descriptor");
wb_data->next = NULL;
}
- *cur_desc = &vc_req->head[desc->next];
+ if (unlikely(*nb_descs == 0))
+ *cur_desc = NULL;
+ else
+ *cur_desc = &vc_req->head[desc->next];
*end_wb_data = wb_data;
prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
struct vhost_crypto_data_req *vc_req,
struct virtio_crypto_cipher_data_req *cipher,
- struct vring_desc *cur_desc)
+ struct vring_desc *cur_desc,
+ uint32_t *nb_descs)
{
struct vring_desc *desc = cur_desc;
struct vhost_crypto_writeback_data *ewb = NULL;
/* prepare */
/* iv */
- if (unlikely(copy_data(iv_data, vc_req, &desc,
- cipher->para.iv_len) < 0)) {
+ if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
+ nb_descs) < 0)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
}
if (unlikely(move_desc(vc_req->head, &desc,
- cipher->para.src_data_len) < 0)) {
+ cipher->para.src_data_len, nb_descs) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
goto error_exit;
}
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
- vc_req, &desc, cipher->para.src_data_len)
- < 0)) {
+ vc_req, &desc, cipher->para.src_data_len,
+ nb_descs) < 0)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
}
/* dst */
- desc = find_write_desc(vc_req->head, desc);
+ desc = find_write_desc(vc_req->head, desc, nb_descs);
if (unlikely(!desc)) {
VC_LOG_ERR("Cannot find write location");
ret = VIRTIO_CRYPTO_BADMSG;
}
if (unlikely(move_desc(vc_req->head, &desc,
- cipher->para.dst_data_len) < 0)) {
+ cipher->para.dst_data_len, nb_descs) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
rte_pktmbuf_mtod(m_src, uint8_t *), 0,
- cipher->para.dst_data_len);
+ cipher->para.dst_data_len, nb_descs);
if (unlikely(vc_req->wb == NULL)) {
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
struct vhost_crypto_data_req *vc_req,
struct virtio_crypto_alg_chain_data_req *chain,
- struct vring_desc *cur_desc)
+ struct vring_desc *cur_desc,
+ uint32_t *nb_descs)
{
struct vring_desc *desc = cur_desc, *digest_desc;
struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
/* prepare */
/* iv */
if (unlikely(copy_data(iv_data, vc_req, &desc,
- chain->para.iv_len) < 0)) {
+ chain->para.iv_len, nb_descs) < 0)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
}
if (unlikely(move_desc(vc_req->head, &desc,
- chain->para.src_data_len) < 0)) {
+ chain->para.src_data_len, nb_descs) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
goto error_exit;
}
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
- vc_req, &desc, chain->para.src_data_len)) < 0) {
+ vc_req, &desc, chain->para.src_data_len,
+ nb_descs)) < 0) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
}
/* dst */
- desc = find_write_desc(vc_req->head, desc);
+ desc = find_write_desc(vc_req->head, desc, nb_descs);
if (unlikely(!desc)) {
VC_LOG_ERR("Cannot find write location");
ret = VIRTIO_CRYPTO_BADMSG;
}
if (unlikely(move_desc(vc_req->head, &desc,
- chain->para.dst_data_len) < 0)) {
+ chain->para.dst_data_len, nb_descs) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
if (unlikely(move_desc(vc_req->head, &desc,
- chain->para.hash_result_len) < 0)) {
+ chain->para.hash_result_len, nb_descs) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
rte_pktmbuf_mtod(m_src, uint8_t *),
chain->para.cipher_start_src_offset,
chain->para.dst_data_len -
- chain->para.cipher_start_src_offset);
+ chain->para.cipher_start_src_offset, nb_descs);
if (unlikely(vc_req->wb == NULL)) {
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
/** create a wb_data for digest */
ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
- digest_addr, 0, chain->para.hash_result_len);
+ digest_addr, 0, chain->para.hash_result_len,
+ nb_descs);
if (unlikely(ewb->next == NULL)) {
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
- chain->para.hash_result_len)) < 0) {
+ chain->para.hash_result_len, nb_descs)) < 0) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
struct vring_desc *desc = NULL;
uint64_t session_id;
uint64_t dlen;
+ uint32_t nb_descs = vq->size;
int err = 0;
vc_req->desc_idx = desc_idx;
if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
dlen = head->len;
+ nb_descs = dlen / sizeof(struct vring_desc);
+ /* drop invalid descriptors */
+ if (unlikely(nb_descs > vq->size))
+ return -1;
desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
&dlen, VHOST_ACCESS_RO);
if (unlikely(!desc || dlen != head->len))
goto error_exit;
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
req = &tmp_req;
- if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req))
- < 0)) {
+ if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
+ &nb_descs) < 0)) {
err = VIRTIO_CRYPTO_BADMSG;
VC_LOG_ERR("Invalid descriptor");
goto error_exit;
}
} else {
if (unlikely(move_desc(vc_req->head, &desc,
- sizeof(*req)) < 0)) {
+ sizeof(*req), &nb_descs) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
goto error_exit;
}
break;
case VIRTIO_CRYPTO_SYM_OP_CIPHER:
err = prepare_sym_cipher_op(vcrypto, op, vc_req,
- &req->u.sym_req.u.cipher, desc);
+ &req->u.sym_req.u.cipher, desc,
+ &nb_descs);
break;
case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
err = prepare_sym_chain_op(vcrypto, op, vc_req,
- &req->u.sym_req.u.chain, desc);
+ &req->u.sym_req.u.chain, desc,
+ &nb_descs);
break;
}
if (unlikely(err != 0)) {
error_exit:
- inhdr = reach_inhdr(vc_req, desc);
+ inhdr = reach_inhdr(vc_req, desc, &nb_descs);
if (likely(inhdr != NULL))
inhdr->status = (uint8_t)err;