vhost/crypto: fix possible dead loop
authorFan Zhang <roy.fan.zhang@intel.com>
Fri, 4 Jan 2019 11:22:45 +0000 (11:22 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 14 Jan 2019 16:44:29 +0000 (17:44 +0100)
This patch fixes a possible infinite loop caused by incorrect
descriptor chain created by the driver.

Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")
Cc: stable@dpdk.org
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
lib/librte_vhost/vhost_crypto.c

index 598196f..e12458c 100644 (file)
@@ -467,12 +467,17 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
 }
 
 static __rte_always_inline struct vring_desc *
-find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+find_write_desc(struct vring_desc *head, struct vring_desc *desc,
+               uint32_t *nb_descs)
 {
        if (desc->flags & VRING_DESC_F_WRITE)
                return desc;
 
        while (desc->flags & VRING_DESC_F_NEXT) {
+               if (unlikely(*nb_descs == 0))
+                       return NULL;
+               (*nb_descs)--;
+
                desc = &head[desc->next];
                if (desc->flags & VRING_DESC_F_WRITE)
                        return desc;
@@ -482,13 +487,18 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
 }
 
 static struct virtio_crypto_inhdr *
-reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
+reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
+               uint32_t *nb_descs)
 {
        uint64_t dlen;
        struct virtio_crypto_inhdr *inhdr;
 
-       while (desc->flags & VRING_DESC_F_NEXT)
+       while (desc->flags & VRING_DESC_F_NEXT) {
+               if (unlikely(*nb_descs == 0))
+                       return NULL;
+               (*nb_descs)--;
                desc = &vc_req->head[desc->next];
+       }
 
        dlen = desc->len;
        inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
@@ -501,15 +511,16 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
 
 static __rte_always_inline int
 move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
-               uint32_t size)
+               uint32_t size, uint32_t *nb_descs)
 {
        struct vring_desc *desc = *cur_desc;
-       int left = size;
-
-       rte_prefetch0(&head[desc->next]);
-       left -= desc->len;
+       int left = size - desc->len;
 
        while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+               (*nb_descs)--;
+               if (unlikely(*nb_descs == 0))
+                       return -1;
+
                desc = &head[desc->next];
                rte_prefetch0(&head[desc->next]);
                left -= desc->len;
@@ -518,7 +529,10 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
        if (unlikely(left > 0))
                return -1;
 
-       *cur_desc = &head[desc->next];
+       if (unlikely(*nb_descs == 0))
+               *cur_desc = NULL;
+       else
+               *cur_desc = &head[desc->next];
        return 0;
 }
 
@@ -540,7 +554,7 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
 
 static int
 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
-               struct vring_desc **cur_desc, uint32_t size)
+               struct vring_desc **cur_desc, uint32_t size, uint32_t *nb_descs)
 {
        struct vring_desc *desc = *cur_desc;
        uint64_t remain, addr, dlen, len;
@@ -549,7 +563,6 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
        uint8_t *src;
        int left = size;
 
-       rte_prefetch0(&vc_req->head[desc->next]);
        to_copy = RTE_MIN(desc->len, (uint32_t)left);
        dlen = to_copy;
        src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
@@ -583,6 +596,12 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
        left -= to_copy;
 
        while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+               if (unlikely(*nb_descs == 0)) {
+                       VC_LOG_ERR("Invalid descriptors");
+                       return -1;
+               }
+               (*nb_descs)--;
+
                desc = &vc_req->head[desc->next];
                rte_prefetch0(&vc_req->head[desc->next]);
                to_copy = RTE_MIN(desc->len, (uint32_t)left);
@@ -625,7 +644,10 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
                return -1;
        }
 
-       *cur_desc = &vc_req->head[desc->next];
+       if (unlikely(*nb_descs == 0))
+               *cur_desc = NULL;
+       else
+               *cur_desc = &vc_req->head[desc->next];
 
        return 0;
 }
@@ -685,7 +707,8 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
                struct vhost_crypto_writeback_data **end_wb_data,
                uint8_t *src,
                uint32_t offset,
-               uint64_t write_back_len)
+               uint64_t write_back_len,
+               uint32_t *nb_descs)
 {
        struct vhost_crypto_writeback_data *wb_data, *head;
        struct vring_desc *desc = *cur_desc;
@@ -732,6 +755,12 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
                offset -= desc->len;
 
        while (write_back_len) {
+               if (unlikely(*nb_descs == 0)) {
+                       VC_LOG_ERR("Invalid descriptors");
+                       goto error_exit;
+               }
+               (*nb_descs)--;
+
                desc = &vc_req->head[desc->next];
                if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
                        VC_LOG_ERR("incorrect descriptor");
@@ -771,7 +800,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
                        wb_data->next = NULL;
        }
 
-       *cur_desc = &vc_req->head[desc->next];
+       if (unlikely(*nb_descs == 0))
+               *cur_desc = NULL;
+       else
+               *cur_desc = &vc_req->head[desc->next];
 
        *end_wb_data = wb_data;
 
@@ -788,7 +820,8 @@ static uint8_t
 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
                struct vhost_crypto_data_req *vc_req,
                struct virtio_crypto_cipher_data_req *cipher,
-               struct vring_desc *cur_desc)
+               struct vring_desc *cur_desc,
+               uint32_t *nb_descs)
 {
        struct vring_desc *desc = cur_desc;
        struct vhost_crypto_writeback_data *ewb = NULL;
@@ -798,8 +831,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
        /* prepare */
        /* iv */
-       if (unlikely(copy_data(iv_data, vc_req, &desc,
-                       cipher->para.iv_len) < 0)) {
+       if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
+                       nb_descs) < 0)) {
                ret = VIRTIO_CRYPTO_BADMSG;
                goto error_exit;
        }
@@ -819,7 +852,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
                }
 
                if (unlikely(move_desc(vc_req->head, &desc,
-                               cipher->para.src_data_len) < 0)) {
+                               cipher->para.src_data_len, nb_descs) < 0)) {
                        VC_LOG_ERR("Incorrect descriptor");
                        ret = VIRTIO_CRYPTO_ERR;
                        goto error_exit;
@@ -836,8 +869,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
                        goto error_exit;
                }
                if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
-                               vc_req, &desc, cipher->para.src_data_len)
-                               < 0)) {
+                               vc_req, &desc, cipher->para.src_data_len,
+                               nb_descs) < 0)) {
                        ret = VIRTIO_CRYPTO_BADMSG;
                        goto error_exit;
                }
@@ -848,7 +881,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
        }
 
        /* dst */
-       desc = find_write_desc(vc_req->head, desc);
+       desc = find_write_desc(vc_req->head, desc, nb_descs);
        if (unlikely(!desc)) {
                VC_LOG_ERR("Cannot find write location");
                ret = VIRTIO_CRYPTO_BADMSG;
@@ -867,7 +900,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
                }
 
                if (unlikely(move_desc(vc_req->head, &desc,
-                               cipher->para.dst_data_len) < 0)) {
+                               cipher->para.dst_data_len, nb_descs) < 0)) {
                        VC_LOG_ERR("Incorrect descriptor");
                        ret = VIRTIO_CRYPTO_ERR;
                        goto error_exit;
@@ -878,7 +911,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
        case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
                vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
                                rte_pktmbuf_mtod(m_src, uint8_t *), 0,
-                               cipher->para.dst_data_len);
+                               cipher->para.dst_data_len, nb_descs);
                if (unlikely(vc_req->wb == NULL)) {
                        ret = VIRTIO_CRYPTO_ERR;
                        goto error_exit;
@@ -920,7 +953,8 @@ static uint8_t
 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
                struct vhost_crypto_data_req *vc_req,
                struct virtio_crypto_alg_chain_data_req *chain,
-               struct vring_desc *cur_desc)
+               struct vring_desc *cur_desc,
+               uint32_t *nb_descs)
 {
        struct vring_desc *desc = cur_desc, *digest_desc;
        struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
@@ -933,7 +967,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
        /* prepare */
        /* iv */
        if (unlikely(copy_data(iv_data, vc_req, &desc,
-                       chain->para.iv_len) < 0)) {
+                       chain->para.iv_len, nb_descs) < 0)) {
                ret = VIRTIO_CRYPTO_BADMSG;
                goto error_exit;
        }
@@ -954,7 +988,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
                }
 
                if (unlikely(move_desc(vc_req->head, &desc,
-                               chain->para.src_data_len) < 0)) {
+                               chain->para.src_data_len, nb_descs) < 0)) {
                        VC_LOG_ERR("Incorrect descriptor");
                        ret = VIRTIO_CRYPTO_ERR;
                        goto error_exit;
@@ -970,7 +1004,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
                        goto error_exit;
                }
                if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
-                               vc_req, &desc, chain->para.src_data_len)) < 0) {
+                               vc_req, &desc, chain->para.src_data_len,
+                               nb_descs)) < 0) {
                        ret = VIRTIO_CRYPTO_BADMSG;
                        goto error_exit;
                }
@@ -982,7 +1017,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
        }
 
        /* dst */
-       desc = find_write_desc(vc_req->head, desc);
+       desc = find_write_desc(vc_req->head, desc, nb_descs);
        if (unlikely(!desc)) {
                VC_LOG_ERR("Cannot find write location");
                ret = VIRTIO_CRYPTO_BADMSG;
@@ -1001,7 +1036,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
                }
 
                if (unlikely(move_desc(vc_req->head, &desc,
-                               chain->para.dst_data_len) < 0)) {
+                               chain->para.dst_data_len, nb_descs) < 0)) {
                        VC_LOG_ERR("Incorrect descriptor");
                        ret = VIRTIO_CRYPTO_ERR;
                        goto error_exit;
@@ -1018,7 +1053,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
                }
 
                if (unlikely(move_desc(vc_req->head, &desc,
-                               chain->para.hash_result_len) < 0)) {
+                               chain->para.hash_result_len, nb_descs) < 0)) {
                        VC_LOG_ERR("Incorrect descriptor");
                        ret = VIRTIO_CRYPTO_ERR;
                        goto error_exit;
@@ -1030,7 +1065,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
                                rte_pktmbuf_mtod(m_src, uint8_t *),
                                chain->para.cipher_start_src_offset,
                                chain->para.dst_data_len -
-                               chain->para.cipher_start_src_offset);
+                               chain->para.cipher_start_src_offset, nb_descs);
                if (unlikely(vc_req->wb == NULL)) {
                        ret = VIRTIO_CRYPTO_ERR;
                        goto error_exit;
@@ -1043,14 +1078,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
                /** create a wb_data for digest */
                ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
-                               digest_addr, 0, chain->para.hash_result_len);
+                               digest_addr, 0, chain->para.hash_result_len,
+                               nb_descs);
                if (unlikely(ewb->next == NULL)) {
                        ret = VIRTIO_CRYPTO_ERR;
                        goto error_exit;
                }
 
                if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
-                               chain->para.hash_result_len)) < 0) {
+                               chain->para.hash_result_len, nb_descs)) < 0) {
                        ret = VIRTIO_CRYPTO_BADMSG;
                        goto error_exit;
                }
@@ -1109,6 +1145,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
        struct vring_desc *desc = NULL;
        uint64_t session_id;
        uint64_t dlen;
+       uint32_t nb_descs = vq->size;
        int err = 0;
 
        vc_req->desc_idx = desc_idx;
@@ -1117,6 +1154,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 
        if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
                dlen = head->len;
+               nb_descs = dlen / sizeof(struct vring_desc);
+               /* drop invalid descriptors */
+               if (unlikely(nb_descs > vq->size))
+                       return -1;
                desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
                                &dlen, VHOST_ACCESS_RO);
                if (unlikely(!desc || dlen != head->len))
@@ -1139,8 +1180,8 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
                        goto error_exit;
                case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
                        req = &tmp_req;
-                       if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req))
-                                       < 0)) {
+                       if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
+                                       &nb_descs) < 0)) {
                                err = VIRTIO_CRYPTO_BADMSG;
                                VC_LOG_ERR("Invalid descriptor");
                                goto error_exit;
@@ -1153,7 +1194,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
                }
        } else {
                if (unlikely(move_desc(vc_req->head, &desc,
-                               sizeof(*req)) < 0)) {
+                               sizeof(*req), &nb_descs) < 0)) {
                        VC_LOG_ERR("Incorrect descriptor");
                        goto error_exit;
                }
@@ -1194,11 +1235,13 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
                        break;
                case VIRTIO_CRYPTO_SYM_OP_CIPHER:
                        err = prepare_sym_cipher_op(vcrypto, op, vc_req,
-                                       &req->u.sym_req.u.cipher, desc);
+                                       &req->u.sym_req.u.cipher, desc,
+                                       &nb_descs);
                        break;
                case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
                        err = prepare_sym_chain_op(vcrypto, op, vc_req,
-                                       &req->u.sym_req.u.chain, desc);
+                                       &req->u.sym_req.u.chain, desc,
+                                       &nb_descs);
                        break;
                }
                if (unlikely(err != 0)) {
@@ -1216,7 +1259,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 
 error_exit:
 
-       inhdr = reach_inhdr(vc_req, desc);
+       inhdr = reach_inhdr(vc_req, desc, &nb_descs);
        if (likely(inhdr != NULL))
                inhdr->status = (uint8_t)err;