X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost_crypto.c;h=6689c52df2393f266d0ec44a706d0ab17646a71b;hb=e863fe3a13da89787fdf3b5c590101a3c0f10af6;hp=620a1df3dc3d68eab022293e51a34b664a9081fb;hpb=d4cc4c65dfb46d4262479daecbf5d3281dc68e6a;p=dpdk.git diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c index 620a1df3dc..6689c52df2 100644 --- a/lib/librte_vhost/vhost_crypto.c +++ b/lib/librte_vhost/vhost_crypto.c @@ -35,126 +35,126 @@ #define VC_LOG_DBG(fmt, args...) #endif -#define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) | \ - (1 << VIRTIO_RING_F_INDIRECT_DESC) | \ - (1 << VIRTIO_RING_F_EVENT_IDX) | \ - (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) | \ - (1 << VIRTIO_CRYPTO_SERVICE_MAC) | \ - (1 << VIRTIO_NET_F_CTRL_VQ)) - -#define GPA_TO_VVA(t, m, a, l) ((t)(uintptr_t)rte_vhost_va_from_guest_pa(m, a, l)) +#define VIRTIO_CRYPTO_FEATURES ((1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | \ + (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \ + (1ULL << VIRTIO_RING_F_EVENT_IDX) | \ + (1ULL << VIRTIO_NET_F_CTRL_VQ) | \ + (1ULL << VIRTIO_F_VERSION_1) | \ + (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) + +#define IOVA_TO_VVA(t, r, a, l, p) \ + ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p)) + +/* + * vhost_crypto_desc is used to copy original vring_desc to the local buffer + * before processing (except the next index). The copy result will be an + * array of vhost_crypto_desc elements that follows the sequence of original + * vring_desc.next is arranged. + */ +#define vhost_crypto_desc vring_desc static int -cipher_algo_transform(uint32_t virtio_cipher_algo) +cipher_algo_transform(uint32_t virtio_cipher_algo, + enum rte_crypto_cipher_algorithm *algo) { - int ret; - switch (virtio_cipher_algo) { case VIRTIO_CRYPTO_CIPHER_AES_CBC: - ret = RTE_CRYPTO_CIPHER_AES_CBC; + *algo = RTE_CRYPTO_CIPHER_AES_CBC; break; case VIRTIO_CRYPTO_CIPHER_AES_CTR: - ret = RTE_CRYPTO_CIPHER_AES_CTR; + *algo = RTE_CRYPTO_CIPHER_AES_CTR; break; case VIRTIO_CRYPTO_CIPHER_DES_ECB: - ret = -VIRTIO_CRYPTO_NOTSUPP; + *algo = -VIRTIO_CRYPTO_NOTSUPP; break; case VIRTIO_CRYPTO_CIPHER_DES_CBC: - ret = RTE_CRYPTO_CIPHER_DES_CBC; + *algo = RTE_CRYPTO_CIPHER_DES_CBC; break; case VIRTIO_CRYPTO_CIPHER_3DES_ECB: - ret = RTE_CRYPTO_CIPHER_3DES_ECB; + *algo = RTE_CRYPTO_CIPHER_3DES_ECB; break; case VIRTIO_CRYPTO_CIPHER_3DES_CBC: - ret = RTE_CRYPTO_CIPHER_3DES_CBC; + *algo = RTE_CRYPTO_CIPHER_3DES_CBC; break; case VIRTIO_CRYPTO_CIPHER_3DES_CTR: - ret = RTE_CRYPTO_CIPHER_3DES_CTR; + *algo = RTE_CRYPTO_CIPHER_3DES_CTR; break; case VIRTIO_CRYPTO_CIPHER_KASUMI_F8: - ret = RTE_CRYPTO_CIPHER_KASUMI_F8; + *algo = RTE_CRYPTO_CIPHER_KASUMI_F8; break; case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2: - ret = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; + *algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; break; case VIRTIO_CRYPTO_CIPHER_AES_F8: - ret = RTE_CRYPTO_CIPHER_AES_F8; + *algo = RTE_CRYPTO_CIPHER_AES_F8; break; case VIRTIO_CRYPTO_CIPHER_AES_XTS: - ret = RTE_CRYPTO_CIPHER_AES_XTS; + *algo = RTE_CRYPTO_CIPHER_AES_XTS; break; case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3: - ret = RTE_CRYPTO_CIPHER_ZUC_EEA3; + *algo = RTE_CRYPTO_CIPHER_ZUC_EEA3; break; default: - ret = -VIRTIO_CRYPTO_BADMSG; + return -VIRTIO_CRYPTO_BADMSG; break; } - return ret; + return 0; } static int -auth_algo_transform(uint32_t virtio_auth_algo) +auth_algo_transform(uint32_t virtio_auth_algo, + enum rte_crypto_auth_algorithm *algo) { - int ret; - switch (virtio_auth_algo) { - case VIRTIO_CRYPTO_NO_MAC: - ret = RTE_CRYPTO_AUTH_NULL; + *algo = RTE_CRYPTO_AUTH_NULL; break; case VIRTIO_CRYPTO_MAC_HMAC_MD5: - ret = RTE_CRYPTO_AUTH_MD5_HMAC; + *algo = RTE_CRYPTO_AUTH_MD5_HMAC; break; case VIRTIO_CRYPTO_MAC_HMAC_SHA1: - ret = RTE_CRYPTO_AUTH_SHA1_HMAC; + *algo = RTE_CRYPTO_AUTH_SHA1_HMAC; break; case VIRTIO_CRYPTO_MAC_HMAC_SHA_224: - ret = RTE_CRYPTO_AUTH_SHA224_HMAC; + *algo = RTE_CRYPTO_AUTH_SHA224_HMAC; break; case VIRTIO_CRYPTO_MAC_HMAC_SHA_256: - ret = RTE_CRYPTO_AUTH_SHA256_HMAC; + *algo = RTE_CRYPTO_AUTH_SHA256_HMAC; break; case VIRTIO_CRYPTO_MAC_HMAC_SHA_384: - ret = RTE_CRYPTO_AUTH_SHA384_HMAC; + *algo = RTE_CRYPTO_AUTH_SHA384_HMAC; break; case VIRTIO_CRYPTO_MAC_HMAC_SHA_512: - ret = RTE_CRYPTO_AUTH_SHA512_HMAC; - break; - case VIRTIO_CRYPTO_MAC_CMAC_3DES: - ret = -VIRTIO_CRYPTO_NOTSUPP; + *algo = RTE_CRYPTO_AUTH_SHA512_HMAC; break; case VIRTIO_CRYPTO_MAC_CMAC_AES: - ret = RTE_CRYPTO_AUTH_AES_CMAC; + *algo = RTE_CRYPTO_AUTH_AES_CMAC; break; case VIRTIO_CRYPTO_MAC_KASUMI_F9: - ret = RTE_CRYPTO_AUTH_KASUMI_F9; + *algo = RTE_CRYPTO_AUTH_KASUMI_F9; break; case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2: - ret = RTE_CRYPTO_AUTH_SNOW3G_UIA2; + *algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2; break; case VIRTIO_CRYPTO_MAC_GMAC_AES: - ret = RTE_CRYPTO_AUTH_AES_GMAC; - break; - case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH: - ret = -VIRTIO_CRYPTO_NOTSUPP; + *algo = RTE_CRYPTO_AUTH_AES_GMAC; break; case VIRTIO_CRYPTO_MAC_CBCMAC_AES: - ret = RTE_CRYPTO_AUTH_AES_CBC_MAC; - break; - case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9: - ret = -VIRTIO_CRYPTO_NOTSUPP; + *algo = RTE_CRYPTO_AUTH_AES_CBC_MAC; break; case VIRTIO_CRYPTO_MAC_XCBC_AES: - ret = RTE_CRYPTO_AUTH_AES_XCBC_MAC; + *algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC; break; + case VIRTIO_CRYPTO_MAC_CMAC_3DES: + case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH: + case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9: + return -VIRTIO_CRYPTO_NOTSUPP; default: - ret = -VIRTIO_CRYPTO_BADMSG; - break; + return -VIRTIO_CRYPTO_BADMSG; } - return ret; + return 0; } static int get_iv_len(enum rte_crypto_cipher_algorithm algo) @@ -197,6 +197,8 @@ struct vhost_crypto { struct rte_hash *session_map; struct rte_mempool *mbuf_pool; struct rte_mempool *sess_pool; + struct rte_mempool *sess_priv_pool; + struct rte_mempool *wb_pool; /** DPDK cryptodev ID */ uint8_t cid; @@ -214,13 +216,20 @@ struct vhost_crypto { uint8_t option; } __rte_cache_aligned; +struct vhost_crypto_writeback_data { + uint8_t *src; + uint8_t *dst; + uint64_t len; + struct vhost_crypto_writeback_data *next; +}; + struct vhost_crypto_data_req { struct vring_desc *head; - struct rte_vhost_memory *mem; + struct virtio_net *dev; struct virtio_crypto_inhdr *inhdr; struct vhost_virtqueue *vq; - struct vring_desc *wb_desc; - uint16_t wb_len; + struct vhost_crypto_writeback_data *wb; + struct rte_mempool *wb_pool; uint16_t desc_idx; uint16_t len; uint16_t zero_copy; @@ -232,12 +241,16 @@ transform_cipher_param(struct rte_crypto_sym_xform *xform, { int ret; - ret = cipher_algo_transform(param->cipher_algo); + ret = cipher_algo_transform(param->cipher_algo, &xform->cipher.algo); if (unlikely(ret < 0)) return ret; + if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) { + VC_LOG_DBG("Invalid cipher key length\n"); + return -VIRTIO_CRYPTO_BADMSG; + } + xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - xform->cipher.algo = (uint32_t)ret; xform->cipher.key.length = param->cipher_key_len; if (xform->cipher.key.length > 0) xform->cipher.key.data = param->cipher_key_buf; @@ -283,11 +296,17 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms, } /* cipher */ - ret = cipher_algo_transform(param->cipher_algo); + ret = cipher_algo_transform(param->cipher_algo, + &xform_cipher->cipher.algo); if (unlikely(ret < 0)) return ret; + + if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) { + VC_LOG_DBG("Invalid cipher key length\n"); + return -VIRTIO_CRYPTO_BADMSG; + } + xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - xform_cipher->cipher.algo = (uint32_t)ret; xform_cipher->cipher.key.length = param->cipher_key_len; xform_cipher->cipher.key.data = param->cipher_key_buf; ret = get_iv_len(xform_cipher->cipher.algo); @@ -298,10 +317,15 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms, /* auth */ xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH; - ret = auth_algo_transform(param->hash_algo); + ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo); if (unlikely(ret < 0)) return ret; - xform_auth->auth.algo = (uint32_t)ret; + + if (param->auth_key_len > VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH) { + VC_LOG_DBG("Invalid auth key length\n"); + return -VIRTIO_CRYPTO_BADMSG; + } + xform_auth->auth.digest_length = param->digest_len; xform_auth->auth.key.length = param->auth_key_len; xform_auth->auth.key.data = param->auth_key_buf; @@ -360,7 +384,7 @@ vhost_crypto_create_sess(struct vhost_crypto *vcrypto, } if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1, - vcrypto->sess_pool) < 0) { + vcrypto->sess_priv_pool) < 0) { VC_LOG_ERR("Failed to initialize session"); sess_param->session_id = -VIRTIO_CRYPTO_ERR; return; @@ -424,259 +448,441 @@ vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id) return 0; } -static int -vhost_crypto_msg_post_handler(int vid, void *msg, uint32_t *require_reply) +static enum rte_vhost_msg_result +vhost_crypto_msg_post_handler(int vid, void *msg) { struct virtio_net *dev = get_device(vid); struct vhost_crypto *vcrypto; VhostUserMsg *vmsg = msg; - int ret = 0; + enum rte_vhost_msg_result ret = RTE_VHOST_MSG_RESULT_OK; - if (dev == NULL || require_reply == NULL) { + if (dev == NULL) { VC_LOG_ERR("Invalid vid %i", vid); - return -EINVAL; + return RTE_VHOST_MSG_RESULT_ERR; } vcrypto = dev->extern_data; if (vcrypto == NULL) { VC_LOG_ERR("Cannot find required data, is it initialized?"); - return -ENOENT; + return RTE_VHOST_MSG_RESULT_ERR; } - *require_reply = 0; - - if (vmsg->request.master == VHOST_USER_CRYPTO_CREATE_SESS) { + switch (vmsg->request.master) { + case VHOST_USER_CRYPTO_CREATE_SESS: vhost_crypto_create_sess(vcrypto, &vmsg->payload.crypto_session); - *require_reply = 1; - } else if (vmsg->request.master == VHOST_USER_CRYPTO_CLOSE_SESS) - ret = vhost_crypto_close_sess(vcrypto, vmsg->payload.u64); - else - ret = -EINVAL; + vmsg->fd_num = 0; + ret = RTE_VHOST_MSG_RESULT_REPLY; + break; + case VHOST_USER_CRYPTO_CLOSE_SESS: + if (vhost_crypto_close_sess(vcrypto, vmsg->payload.u64)) + ret = RTE_VHOST_MSG_RESULT_ERR; + break; + default: + ret = RTE_VHOST_MSG_RESULT_NOT_HANDLED; + break; + } return ret; } -static __rte_always_inline struct vring_desc * -find_write_desc(struct vring_desc *head, struct vring_desc *desc) +static __rte_always_inline struct vhost_crypto_desc * +find_write_desc(struct vhost_crypto_desc *head, struct vhost_crypto_desc *desc, + uint32_t max_n_descs) { - if (desc->flags & VRING_DESC_F_WRITE) - return desc; + if (desc < head) + return NULL; - while (desc->flags & VRING_DESC_F_NEXT) { - desc = &head[desc->next]; + while (desc - head < (int)max_n_descs) { if (desc->flags & VRING_DESC_F_WRITE) return desc; + desc++; } return NULL; } -static struct virtio_crypto_inhdr * -reach_inhdr(struct vring_desc *head, struct rte_vhost_memory *mem, - struct vring_desc *desc) +static __rte_always_inline struct virtio_crypto_inhdr * +reach_inhdr(struct vhost_crypto_data_req *vc_req, + struct vhost_crypto_desc *head, + uint32_t max_n_descs) { - uint64_t dlen; struct virtio_crypto_inhdr *inhdr; + struct vhost_crypto_desc *last = head + (max_n_descs - 1); + uint64_t dlen = last->len; - while (desc->flags & VRING_DESC_F_NEXT) - desc = &head[desc->next]; + if (unlikely(dlen != sizeof(*inhdr))) + return NULL; - dlen = desc->len; - inhdr = GPA_TO_VVA(struct virtio_crypto_inhdr *, mem, desc->addr, &dlen); - if (unlikely(dlen != desc->len)) + inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, last->addr, + &dlen, VHOST_ACCESS_WO); + if (unlikely(!inhdr || dlen != last->len)) return NULL; return inhdr; } static __rte_always_inline int -move_desc(struct vring_desc *head, struct vring_desc **cur_desc, - uint32_t size) +move_desc(struct vhost_crypto_desc *head, + struct vhost_crypto_desc **cur_desc, + uint32_t size, uint32_t max_n_descs) { - struct vring_desc *desc = *cur_desc; - int left = size; + struct vhost_crypto_desc *desc = *cur_desc; + int left = size - desc->len; - rte_prefetch0(&head[desc->next]); - left -= desc->len; - - while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { - desc = &head[desc->next]; - rte_prefetch0(&head[desc->next]); + while (desc->flags & VRING_DESC_F_NEXT && left > 0 && + desc >= head && + desc - head < (int)max_n_descs) { + desc++; left -= desc->len; } - if (unlikely(left < 0)) { - VC_LOG_ERR("Incorrect virtio descriptor"); + if (unlikely(left > 0)) return -1; - } - *cur_desc = &head[desc->next]; + if (unlikely(head - desc == (int)max_n_descs)) + *cur_desc = NULL; + else + *cur_desc = desc + 1; + return 0; } -static int -copy_data(void *dst_data, struct vring_desc *head, struct rte_vhost_memory *mem, - struct vring_desc **cur_desc, uint32_t size) +static __rte_always_inline void * +get_data_ptr(struct vhost_crypto_data_req *vc_req, + struct vhost_crypto_desc *cur_desc, + uint8_t perm) +{ + void *data; + uint64_t dlen = cur_desc->len; + + data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm); + if (unlikely(!data || dlen != cur_desc->len)) { + VC_LOG_ERR("Failed to map object"); + return NULL; + } + + return data; +} + +static __rte_always_inline int +copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, + struct vhost_crypto_desc *head, + struct vhost_crypto_desc **cur_desc, + uint32_t size, uint32_t max_n_descs) { - struct vring_desc *desc = *cur_desc; + struct vhost_crypto_desc *desc = *cur_desc; + uint64_t remain, addr, dlen, len; uint32_t to_copy; uint8_t *data = dst_data; uint8_t *src; int left = size; - uint64_t dlen; - rte_prefetch0(&head[desc->next]); to_copy = RTE_MIN(desc->len, (uint32_t)left); - dlen = desc->len; - src = GPA_TO_VVA(uint8_t *, mem, desc->addr, &dlen); - if (unlikely(!src || dlen != desc->len)) { - VC_LOG_ERR("Failed to map descriptor"); + dlen = to_copy; + src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen, + VHOST_ACCESS_RO); + if (unlikely(!src || !dlen)) return -1; + + rte_memcpy((uint8_t *)data, src, dlen); + data += dlen; + + if (unlikely(dlen < to_copy)) { + remain = to_copy - dlen; + addr = desc->addr + dlen; + + while (remain) { + len = remain; + src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len, + VHOST_ACCESS_RO); + if (unlikely(!src || !len)) { + VC_LOG_ERR("Failed to map descriptor"); + return -1; + } + + rte_memcpy(data, src, len); + addr += len; + remain -= len; + data += len; + } } - rte_memcpy((uint8_t *)data, src, to_copy); left -= to_copy; - while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { - desc = &head[desc->next]; - rte_prefetch0(&head[desc->next]); + while (desc >= head && desc - head < (int)max_n_descs && left) { + desc++; to_copy = RTE_MIN(desc->len, (uint32_t)left); - dlen = desc->len; - src = GPA_TO_VVA(uint8_t *, mem, desc->addr, &dlen); - if (unlikely(!src || dlen != desc->len)) { + dlen = to_copy; + src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen, + VHOST_ACCESS_RO); + if (unlikely(!src || !dlen)) { VC_LOG_ERR("Failed to map descriptor"); return -1; } - rte_memcpy(data + size - left, src, to_copy); + rte_memcpy(data, src, dlen); + data += dlen; + + if (unlikely(dlen < to_copy)) { + remain = to_copy - dlen; + addr = desc->addr + dlen; + + while (remain) { + len = remain; + src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len, + VHOST_ACCESS_RO); + if (unlikely(!src || !len)) { + VC_LOG_ERR("Failed to map descriptor"); + return -1; + } + + rte_memcpy(data, src, len); + addr += len; + remain -= len; + data += len; + } + } + left -= to_copy; } - if (unlikely(left < 0)) { + if (unlikely(left > 0)) { VC_LOG_ERR("Incorrect virtio descriptor"); return -1; } - *cur_desc = &head[desc->next]; + if (unlikely(desc - head == (int)max_n_descs)) + *cur_desc = NULL; + else + *cur_desc = desc + 1; return 0; } -static __rte_always_inline void * -get_data_ptr(struct vring_desc *head, struct rte_vhost_memory *mem, - struct vring_desc **cur_desc, uint32_t size) +static void +write_back_data(struct vhost_crypto_data_req *vc_req) { - void *data; - uint64_t dlen = (*cur_desc)->len; - - data = GPA_TO_VVA(void *, mem, (*cur_desc)->addr, &dlen); - if (unlikely(!data || dlen != (*cur_desc)->len)) { - VC_LOG_ERR("Failed to map object"); - return NULL; + struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last; + + while (wb_data) { + rte_memcpy(wb_data->dst, wb_data->src, wb_data->len); + memset(wb_data->src, 0, wb_data->len); + wb_last = wb_data; + wb_data = wb_data->next; + rte_mempool_put(vc_req->wb_pool, wb_last); } +} - if (unlikely(move_desc(head, cur_desc, size) < 0)) - return NULL; +static void +free_wb_data(struct vhost_crypto_writeback_data *wb_data, + struct rte_mempool *mp) +{ + while (wb_data->next != NULL) + free_wb_data(wb_data->next, mp); - return data; + rte_mempool_put(mp, wb_data); } -static int -write_back_data(struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req) +/** + * The function will allocate a vhost_crypto_writeback_data linked list + * containing the source and destination data pointers for the write back + * operation after dequeued from Cryptodev PMD queues. + * + * @param vc_req + * The vhost crypto data request pointer + * @param cur_desc + * The pointer of the current in use descriptor pointer. The content of + * cur_desc is expected to be updated after the function execution. + * @param end_wb_data + * The last write back data element to be returned. It is used only in cipher + * and hash chain operations. + * @param src + * The source data pointer + * @param offset + * The offset to both source and destination data. For source data the offset + * is the number of bytes between src and start point of cipher operation. For + * destination data the offset is the number of bytes from *cur_desc->addr + * to the point where the src will be written to. + * @param write_back_len + * The size of the write back length. + * @return + * The pointer to the start of the write back data linked list. + */ +static __rte_always_inline struct vhost_crypto_writeback_data * +prepare_write_back_data(struct vhost_crypto_data_req *vc_req, + struct vhost_crypto_desc *head_desc, + struct vhost_crypto_desc **cur_desc, + struct vhost_crypto_writeback_data **end_wb_data, + uint8_t *src, + uint32_t offset, + uint64_t write_back_len, + uint32_t max_n_descs) { - struct rte_mbuf *mbuf = op->sym->m_dst; - struct vring_desc *head = vc_req->head; - struct rte_vhost_memory *mem = vc_req->mem; - struct vring_desc *desc = vc_req->wb_desc; - int left = vc_req->wb_len; - uint32_t to_write; - uint8_t *src_data = mbuf->buf_addr, *dst; + struct vhost_crypto_writeback_data *wb_data, *head; + struct vhost_crypto_desc *desc = *cur_desc; uint64_t dlen; + uint8_t *dst; + int ret; - rte_prefetch0(&head[desc->next]); - to_write = RTE_MIN(desc->len, (uint32_t)left); - dlen = desc->len; - dst = GPA_TO_VVA(uint8_t *, mem, desc->addr, &dlen); - if (unlikely(!dst || dlen != desc->len)) { - VC_LOG_ERR("Failed to map descriptor"); - return -1; + ret = rte_mempool_get(vc_req->wb_pool, (void **)&head); + if (unlikely(ret < 0)) { + VC_LOG_ERR("no memory"); + goto error_exit; } - rte_memcpy(dst, src_data, to_write); - left -= to_write; - src_data += to_write; + wb_data = head; - while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { - desc = &head[desc->next]; - rte_prefetch0(&head[desc->next]); - to_write = RTE_MIN(desc->len, (uint32_t)left); + if (likely(desc->len > offset)) { + wb_data->src = src + offset; dlen = desc->len; - dst = GPA_TO_VVA(uint8_t *, mem, desc->addr, &dlen); + dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, + &dlen, VHOST_ACCESS_RW); if (unlikely(!dst || dlen != desc->len)) { VC_LOG_ERR("Failed to map descriptor"); - return -1; + goto error_exit; } - rte_memcpy(dst, src_data, to_write); - left -= to_write; - src_data += to_write; - } + wb_data->dst = dst + offset; + wb_data->len = RTE_MIN(dlen - offset, write_back_len); + write_back_len -= wb_data->len; + src += offset + wb_data->len; + offset = 0; + + if (unlikely(write_back_len)) { + ret = rte_mempool_get(vc_req->wb_pool, + (void **)&(wb_data->next)); + if (unlikely(ret < 0)) { + VC_LOG_ERR("no memory"); + goto error_exit; + } - if (unlikely(left < 0)) { - VC_LOG_ERR("Incorrect virtio descriptor"); - return -1; + wb_data = wb_data->next; + } else + wb_data->next = NULL; + } else + offset -= desc->len; + + while (write_back_len && + desc >= head_desc && + desc - head_desc < (int)max_n_descs) { + desc++; + if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) { + VC_LOG_ERR("incorrect descriptor"); + goto error_exit; + } + + if (desc->len <= offset) { + offset -= desc->len; + continue; + } + + dlen = desc->len; + dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen, + VHOST_ACCESS_RW) + offset; + if (unlikely(dst == NULL || dlen != desc->len)) { + VC_LOG_ERR("Failed to map descriptor"); + goto error_exit; + } + + wb_data->src = src + offset; + wb_data->dst = dst; + wb_data->len = RTE_MIN(desc->len - offset, write_back_len); + write_back_len -= wb_data->len; + src += wb_data->len; + offset = 0; + + if (write_back_len) { + ret = rte_mempool_get(vc_req->wb_pool, + (void **)&(wb_data->next)); + if (unlikely(ret < 0)) { + VC_LOG_ERR("no memory"); + goto error_exit; + } + + wb_data = wb_data->next; + } else + wb_data->next = NULL; } - return 0; + if (unlikely(desc - head_desc == (int)max_n_descs)) + *cur_desc = NULL; + else + *cur_desc = desc + 1; + + *end_wb_data = wb_data; + + return head; + +error_exit: + if (head) + free_wb_data(head, vc_req->wb_pool); + + return NULL; } -static uint8_t +static __rte_always_inline uint8_t +vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req *req) +{ + if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) && + (req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) && + (req->para.dst_data_len >= req->para.src_data_len) && + (req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE))) + return VIRTIO_CRYPTO_OK; + return VIRTIO_CRYPTO_BADMSG; +} + +static __rte_always_inline uint8_t prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req, struct virtio_crypto_cipher_data_req *cipher, - struct vring_desc *cur_desc) + struct vhost_crypto_desc *head, + uint32_t max_n_descs) { - struct vring_desc *head = vc_req->head; - struct vring_desc *desc = cur_desc; - struct rte_vhost_memory *mem = vc_req->mem; + struct vhost_crypto_desc *desc = head; + struct vhost_crypto_writeback_data *ewb = NULL; struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst; uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); - uint8_t ret = 0; + uint8_t ret = vhost_crypto_check_cipher_request(cipher); + + if (unlikely(ret != VIRTIO_CRYPTO_OK)) + goto error_exit; /* prepare */ /* iv */ - if (unlikely(copy_data(iv_data, head, mem, &desc, - cipher->para.iv_len) < 0)) { + if (unlikely(copy_data(iv_data, vc_req, head, &desc, + cipher->para.iv_len, max_n_descs))) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } - m_src->data_len = cipher->para.src_data_len; - switch (vcrypto->option) { case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: + m_src->data_len = cipher->para.src_data_len; m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.src_data_len); - m_src->buf_addr = get_data_ptr(head, mem, &desc, - cipher->para.src_data_len); + m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; } - break; - case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: - if (unlikely(cipher->para.src_data_len > - RTE_MBUF_DEFAULT_BUF_SIZE)) { - VC_LOG_ERR("Not enough space to do data copy"); + + if (unlikely(move_desc(head, &desc, cipher->para.src_data_len, + max_n_descs) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; } - if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), head, - mem, &desc, cipher->para.src_data_len)) - < 0) { + + break; + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + vc_req->wb_pool = vcrypto->wb_pool; + m_src->data_len = cipher->para.src_data_len; + if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), + vc_req, head, &desc, cipher->para.src_data_len, + max_n_descs) < 0)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -687,7 +893,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } /* dst */ - desc = find_write_desc(head, desc); + desc = find_write_desc(head, desc, max_n_descs); if (unlikely(!desc)) { VC_LOG_ERR("Cannot find write location"); ret = VIRTIO_CRYPTO_BADMSG; @@ -698,23 +904,31 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: m_dst->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.dst_data_len); - m_dst->buf_addr = get_data_ptr(head, mem, &desc, - cipher->para.dst_data_len); + m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW); if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; } + if (unlikely(move_desc(head, &desc, cipher->para.dst_data_len, + max_n_descs) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + m_dst->data_len = cipher->para.dst_data_len; break; case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: - vc_req->wb_desc = desc; - vc_req->wb_len = cipher->para.dst_data_len; - if (unlikely(move_desc(head, &desc, vc_req->wb_len) < 0)) { + vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb, + rte_pktmbuf_mtod(m_src, uint8_t *), 0, + cipher->para.dst_data_len, max_n_descs); + if (unlikely(vc_req->wb == NULL)) { ret = VIRTIO_CRYPTO_ERR; goto error_exit; } + break; default: ret = VIRTIO_CRYPTO_BADMSG; @@ -728,7 +942,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, op->sym->cipher.data.offset = 0; op->sym->cipher.data.length = cipher->para.src_data_len; - vc_req->inhdr = get_data_ptr(head, mem, &desc, INHDR_LEN); + vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO); if (unlikely(vc_req->inhdr == NULL)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; @@ -740,60 +954,93 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, return 0; error_exit: + if (vc_req->wb) + free_wb_data(vc_req->wb, vc_req->wb_pool); + vc_req->len = INHDR_LEN; return ret; } -static uint8_t +static __rte_always_inline uint8_t +vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req *req) +{ + if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) && + (req->para.src_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) && + (req->para.dst_data_len >= req->para.src_data_len) && + (req->para.dst_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) && + (req->para.cipher_start_src_offset < + VHOST_CRYPTO_MAX_DATA_SIZE) && + (req->para.len_to_cipher <= VHOST_CRYPTO_MAX_DATA_SIZE) && + (req->para.hash_start_src_offset < + VHOST_CRYPTO_MAX_DATA_SIZE) && + (req->para.len_to_hash <= VHOST_CRYPTO_MAX_DATA_SIZE) && + (req->para.cipher_start_src_offset + req->para.len_to_cipher <= + req->para.src_data_len) && + (req->para.hash_start_src_offset + req->para.len_to_hash <= + req->para.src_data_len) && + (req->para.dst_data_len + req->para.hash_result_len <= + VHOST_CRYPTO_MAX_DATA_SIZE))) + return VIRTIO_CRYPTO_OK; + return VIRTIO_CRYPTO_BADMSG; +} + +static __rte_always_inline uint8_t prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req, struct virtio_crypto_alg_chain_data_req *chain, - struct vring_desc *cur_desc) + struct vhost_crypto_desc *head, + uint32_t max_n_descs) { - struct vring_desc *head = vc_req->head; - struct vring_desc *desc = cur_desc; - struct rte_vhost_memory *mem = vc_req->mem; + struct vhost_crypto_desc *desc = head, *digest_desc; + struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL; struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst; uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); uint32_t digest_offset; void *digest_addr; - uint8_t ret = 0; + uint8_t ret = vhost_crypto_check_chain_request(chain); + + if (unlikely(ret != VIRTIO_CRYPTO_OK)) + goto error_exit; /* prepare */ /* iv */ - if (unlikely(copy_data(iv_data, head, mem, &desc, - chain->para.iv_len) < 0)) { + if (unlikely(copy_data(iv_data, vc_req, head, &desc, + chain->para.iv_len, max_n_descs) < 0)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } - m_src->data_len = chain->para.src_data_len; - m_dst->data_len = chain->para.dst_data_len; - switch (vcrypto->option) { case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: + m_src->data_len = chain->para.src_data_len; + m_dst->data_len = chain->para.dst_data_len; + m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.src_data_len); - m_src->buf_addr = get_data_ptr(head, mem, &desc, - chain->para.src_data_len); + m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; } - break; - case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: - if (unlikely(chain->para.src_data_len > - RTE_MBUF_DEFAULT_BUF_SIZE)) { - VC_LOG_ERR("Not enough space to do data copy"); + + if (unlikely(move_desc(head, &desc, chain->para.src_data_len, + max_n_descs) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; } - if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), head, - mem, &desc, chain->para.src_data_len)) < 0) { + break; + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + vc_req->wb_pool = vcrypto->wb_pool; + m_src->data_len = chain->para.src_data_len; + if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), + vc_req, head, &desc, chain->para.src_data_len, + max_n_descs) < 0)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } + break; default: ret = VIRTIO_CRYPTO_BADMSG; @@ -801,7 +1048,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } /* dst */ - desc = find_write_desc(head, desc); + desc = find_write_desc(head, desc, max_n_descs); if (unlikely(!desc)) { VC_LOG_ERR("Cannot find write location"); ret = VIRTIO_CRYPTO_BADMSG; @@ -812,52 +1059,75 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: m_dst->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.dst_data_len); - m_dst->buf_addr = get_data_ptr(head, mem, &desc, - chain->para.dst_data_len); + m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW); if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; } + if (unlikely(move_desc(vc_req->head, &desc, + chain->para.dst_data_len, max_n_descs) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.hash_result_len); - op->sym->auth.digest.data = get_data_ptr(head, mem, &desc, - chain->para.hash_result_len); + op->sym->auth.digest.data = get_data_ptr(vc_req, desc, + VHOST_ACCESS_RW); if (unlikely(op->sym->auth.digest.phys_addr == 0)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); ret = VIRTIO_CRYPTO_ERR; goto error_exit; } + + if (unlikely(move_desc(head, &desc, + chain->para.hash_result_len, + max_n_descs) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + break; case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: - digest_offset = m_dst->data_len; - digest_addr = rte_pktmbuf_mtod_offset(m_dst, void *, - digest_offset); + vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb, + rte_pktmbuf_mtod(m_src, uint8_t *), + chain->para.cipher_start_src_offset, + chain->para.dst_data_len - + chain->para.cipher_start_src_offset, + max_n_descs); + if (unlikely(vc_req->wb == NULL)) { + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } - vc_req->wb_desc = desc; - vc_req->wb_len = m_dst->data_len + chain->para.hash_result_len; + digest_desc = desc; + digest_offset = m_src->data_len; + digest_addr = rte_pktmbuf_mtod_offset(m_src, void *, + digest_offset); - if (unlikely(move_desc(head, &desc, - chain->para.dst_data_len) < 0)) { - ret = VIRTIO_CRYPTO_BADMSG; + /** create a wb_data for digest */ + ewb->next = prepare_write_back_data(vc_req, head, &desc, + &ewb2, digest_addr, 0, + chain->para.hash_result_len, max_n_descs); + if (unlikely(ewb->next == NULL)) { + ret = VIRTIO_CRYPTO_ERR; goto error_exit; } - if (unlikely(copy_data(digest_addr, head, mem, &desc, - chain->para.hash_result_len)) < 0) { + if (unlikely(copy_data(digest_addr, vc_req, head, &digest_desc, + chain->para.hash_result_len, + max_n_descs) < 0)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } op->sym->auth.digest.data = digest_addr; - op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_dst, + op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src, digest_offset); - if (unlikely(move_desc(head, &desc, - chain->para.hash_result_len) < 0)) { - ret = VIRTIO_CRYPTO_ERR; - goto error_exit; - } break; default: ret = VIRTIO_CRYPTO_BADMSG; @@ -865,7 +1135,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } /* record inhdr */ - vc_req->inhdr = get_data_ptr(head, mem, &desc, INHDR_LEN); + vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO); if (unlikely(vc_req->inhdr == NULL)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; @@ -888,6 +1158,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, return 0; error_exit: + if (vc_req->wb) + free_wb_data(vc_req->wb, vc_req->wb_pool); vc_req->len = INHDR_LEN; return ret; } @@ -898,49 +1170,103 @@ error_exit: static __rte_always_inline int vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, struct vhost_virtqueue *vq, struct rte_crypto_op *op, - struct vring_desc *head, uint16_t desc_idx, - struct rte_vhost_memory *mem) + struct vring_desc *head, struct vhost_crypto_desc *descs, + uint16_t desc_idx) { - struct vhost_crypto_data_req *vc_req = RTE_PTR_ADD(op->sym->m_src, - sizeof(struct rte_mbuf)); + struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src); struct rte_cryptodev_sym_session *session; - struct virtio_crypto_op_data_req *req; + struct virtio_crypto_op_data_req req; struct virtio_crypto_inhdr *inhdr; - struct vring_desc *desc = NULL; + struct vhost_crypto_desc *desc = descs; + struct vring_desc *src_desc; uint64_t session_id; uint64_t dlen; - int err = 0; + uint32_t nb_descs = 0, max_n_descs, i; + int err; vc_req->desc_idx = desc_idx; + vc_req->dev = vcrypto->dev; + vc_req->vq = vq; + + if (unlikely((head->flags & VRING_DESC_F_INDIRECT) == 0)) { + VC_LOG_ERR("Invalid descriptor"); + return -1; + } + + dlen = head->len; + src_desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr, + &dlen, VHOST_ACCESS_RO); + if (unlikely(!src_desc || dlen != head->len)) { + VC_LOG_ERR("Invalid descriptor"); + return -1; + } + head = src_desc; - if (likely(head->flags & VRING_DESC_F_INDIRECT)) { - dlen = head->len; - desc = GPA_TO_VVA(struct vring_desc *, mem, head->addr, &dlen); - if (unlikely(!desc || dlen != head->len)) + nb_descs = max_n_descs = dlen / sizeof(struct vring_desc); + if (unlikely(nb_descs > VHOST_CRYPTO_MAX_N_DESC || nb_descs == 0)) { + err = VIRTIO_CRYPTO_ERR; + VC_LOG_ERR("Cannot process num of descriptors %u", nb_descs); + if (nb_descs > 0) { + struct vring_desc *inhdr_desc = head; + while (inhdr_desc->flags & VRING_DESC_F_NEXT) { + if (inhdr_desc->next >= max_n_descs) + return -1; + inhdr_desc = &head[inhdr_desc->next]; + } + if (inhdr_desc->len != sizeof(*inhdr)) + return -1; + inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, + vc_req, inhdr_desc->addr, &dlen, + VHOST_ACCESS_WO); + if (unlikely(!inhdr || dlen != inhdr_desc->len)) + return -1; + inhdr->status = VIRTIO_CRYPTO_ERR; return -1; - desc_idx = 0; - head = desc; - } else { - desc = head; + } } - vc_req->mem = mem; - vc_req->head = head; - vc_req->vq = vq; + /* copy descriptors to local variable */ + for (i = 0; i < max_n_descs; i++) { + desc->addr = src_desc->addr; + desc->len = src_desc->len; + desc->flags = src_desc->flags; + desc++; + if (unlikely((src_desc->flags & VRING_DESC_F_NEXT) == 0)) + break; + if (unlikely(src_desc->next >= max_n_descs)) { + err = VIRTIO_CRYPTO_BADMSG; + VC_LOG_ERR("Invalid descriptor"); + goto error_exit; + } + src_desc = &head[src_desc->next]; + } + vc_req->head = head; vc_req->zero_copy = vcrypto->option; - req = get_data_ptr(head, mem, &desc, sizeof(*req)); - if (unlikely(req == NULL)) { - err = VIRTIO_CRYPTO_ERR; + nb_descs = desc - descs; + desc = descs; + + if (unlikely(desc->len < sizeof(req))) { + err = VIRTIO_CRYPTO_BADMSG; VC_LOG_ERR("Invalid descriptor"); goto error_exit; } - switch (req->header.opcode) { + if (unlikely(copy_data(&req, vc_req, descs, &desc, sizeof(req), + max_n_descs) < 0)) { + err = VIRTIO_CRYPTO_BADMSG; + VC_LOG_ERR("Invalid descriptor"); + goto error_exit; + } + + /* desc is advanced by 1 now */ + max_n_descs -= 1; + + switch (req.header.opcode) { case VIRTIO_CRYPTO_CIPHER_ENCRYPT: case VIRTIO_CRYPTO_CIPHER_DECRYPT: - session_id = req->header.session_id; + session_id = req.header.session_id; /* one branch to avoid unnecessary table lookup */ if (vcrypto->cache_session_id != session_id) { @@ -966,17 +1292,19 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, goto error_exit; } - switch (req->u.sym_req.op_type) { + switch (req.u.sym_req.op_type) { case VIRTIO_CRYPTO_SYM_OP_NONE: err = VIRTIO_CRYPTO_NOTSUPP; break; case VIRTIO_CRYPTO_SYM_OP_CIPHER: err = prepare_sym_cipher_op(vcrypto, op, vc_req, - &req->u.sym_req.u.cipher, desc); + &req.u.sym_req.u.cipher, desc, + max_n_descs); break; case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: err = prepare_sym_chain_op(vcrypto, op, vc_req, - &req->u.sym_req.u.chain, desc); + &req.u.sym_req.u.chain, desc, + max_n_descs); break; } if (unlikely(err != 0)) { @@ -985,8 +1313,9 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, } break; default: + err = VIRTIO_CRYPTO_ERR; VC_LOG_ERR("Unsupported symmetric crypto request type %u", - req->header.opcode); + req.header.opcode); goto error_exit; } @@ -994,7 +1323,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, error_exit: - inhdr = reach_inhdr(head, mem, desc); + inhdr = reach_inhdr(vc_req, descs, max_n_descs); if (likely(inhdr != NULL)) inhdr->status = (uint8_t)err; @@ -1007,37 +1336,34 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, { struct rte_mbuf *m_src = op->sym->m_src; struct rte_mbuf *m_dst = op->sym->m_dst; - struct vhost_crypto_data_req *vc_req = RTE_PTR_ADD(m_src, - sizeof(struct rte_mbuf)); - uint16_t desc_idx; - int ret = 0; + struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src); + struct vhost_virtqueue *vq = vc_req->vq; + uint16_t used_idx = vc_req->desc_idx, desc_idx; if (unlikely(!vc_req)) { VC_LOG_ERR("Failed to retrieve vc_req"); return NULL; } - if (old_vq && (vc_req->vq != old_vq)) - return vc_req->vq; - - desc_idx = vc_req->desc_idx; + if (old_vq && (vq != old_vq)) + return vq; if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) vc_req->inhdr->status = VIRTIO_CRYPTO_ERR; else { - if (vc_req->zero_copy == 0) { - ret = write_back_data(op, vc_req); - if (unlikely(ret != 0)) - vc_req->inhdr->status = VIRTIO_CRYPTO_ERR; - } + if (vc_req->zero_copy == 0) + write_back_data(vc_req); } - vc_req->vq->used->ring[desc_idx].id = desc_idx; - vc_req->vq->used->ring[desc_idx].len = vc_req->len; + desc_idx = vq->avail->ring[used_idx]; + vq->used->ring[desc_idx].id = vq->avail->ring[desc_idx]; + vq->used->ring[desc_idx].len = vc_req->len; - rte_mempool_put(m_dst->pool, (void *)m_dst); rte_mempool_put(m_src->pool, (void *)m_src); + if (m_dst) + rte_mempool_put(m_dst->pool, (void *)m_dst); + return vc_req->vq; } @@ -1073,9 +1399,32 @@ vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops, return processed; } -int __rte_experimental +int +rte_vhost_crypto_driver_start(const char *path) +{ + uint64_t protocol_features; + int ret; + + ret = rte_vhost_driver_set_features(path, VIRTIO_CRYPTO_FEATURES); + if (ret) + return -1; + + ret = rte_vhost_driver_get_protocol_features(path, &protocol_features); + if (ret) + return -1; + protocol_features |= (1ULL << VHOST_USER_PROTOCOL_F_CONFIG); + ret = rte_vhost_driver_set_protocol_features(path, protocol_features); + if (ret) + return -1; + + return rte_vhost_driver_start(path); +} + +int rte_vhost_crypto_create(int vid, uint8_t cryptodev_id, - struct rte_mempool *sess_pool, int socket_id) + struct rte_mempool *sess_pool, + struct rte_mempool *sess_priv_pool, + int socket_id) { struct virtio_net *dev = get_device(vid); struct rte_hash_parameters params = {0}; @@ -1088,13 +1437,6 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id, return -EINVAL; } - ret = rte_vhost_driver_set_features(dev->ifname, - VIRTIO_CRYPTO_FEATURES); - if (ret < 0) { - VC_LOG_ERR("Error setting features"); - return -1; - } - vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto), RTE_CACHE_LINE_SIZE, socket_id); if (!vcrypto) { @@ -1103,6 +1445,7 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id, } vcrypto->sess_pool = sess_pool; + vcrypto->sess_priv_pool = sess_priv_pool; vcrypto->cid = cryptodev_id; vcrypto->cache_session_id = UINT64_MAX; vcrypto->last_session_id = 1; @@ -1126,7 +1469,7 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id, vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name, VHOST_CRYPTO_MBUF_POOL_SIZE, 512, sizeof(struct vhost_crypto_data_req), - RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM, + VHOST_CRYPTO_MAX_DATA_SIZE + RTE_PKTMBUF_HEADROOM, rte_socket_id()); if (!vcrypto->mbuf_pool) { VC_LOG_ERR("Failed to creath mbuf pool"); @@ -1134,6 +1477,18 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id, goto error_exit; } + snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid); + vcrypto->wb_pool = rte_mempool_create(name, + VHOST_CRYPTO_MBUF_POOL_SIZE, + sizeof(struct vhost_crypto_writeback_data), + 128, 0, NULL, NULL, NULL, NULL, + rte_socket_id(), 0); + if (!vcrypto->wb_pool) { + VC_LOG_ERR("Failed to creath mempool"); + ret = -ENOMEM; + goto error_exit; + } + dev->extern_data = vcrypto; dev->extern_ops.pre_msg_handle = NULL; dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler; @@ -1151,7 +1506,7 @@ error_exit: return ret; } -int __rte_experimental +int rte_vhost_crypto_free(int vid) { struct virtio_net *dev = get_device(vid); @@ -1170,6 +1525,7 @@ rte_vhost_crypto_free(int vid) rte_hash_free(vcrypto->session_map); rte_mempool_free(vcrypto->mbuf_pool); + rte_mempool_free(vcrypto->wb_pool); rte_free(vcrypto); dev->extern_data = NULL; @@ -1179,7 +1535,7 @@ rte_vhost_crypto_free(int vid) return 0; } -int __rte_experimental +int rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option) { struct virtio_net *dev = get_device(vid); @@ -1205,49 +1561,66 @@ rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option) if (vcrypto->option == (uint8_t)option) return 0; - if (!(rte_mempool_full(vcrypto->mbuf_pool))) { + if (!(rte_mempool_full(vcrypto->mbuf_pool)) || + !(rte_mempool_full(vcrypto->wb_pool))) { VC_LOG_ERR("Cannot update zero copy as mempool is not full"); return -EINVAL; } + if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) { + char name[128]; + + snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid); + vcrypto->wb_pool = rte_mempool_create(name, + VHOST_CRYPTO_MBUF_POOL_SIZE, + sizeof(struct vhost_crypto_writeback_data), + 128, 0, NULL, NULL, NULL, NULL, + rte_socket_id(), 0); + if (!vcrypto->wb_pool) { + VC_LOG_ERR("Failed to creath mbuf pool"); + return -ENOMEM; + } + } else { + rte_mempool_free(vcrypto->wb_pool); + vcrypto->wb_pool = NULL; + } + vcrypto->option = (uint8_t)option; return 0; } -uint16_t __rte_experimental +uint16_t rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, struct rte_crypto_op **ops, uint16_t nb_ops) { struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2]; + struct vhost_crypto_desc descs[VHOST_CRYPTO_MAX_N_DESC]; struct virtio_net *dev = get_device(vid); - struct rte_vhost_memory *mem; struct vhost_crypto *vcrypto; struct vhost_virtqueue *vq; uint16_t avail_idx; uint16_t start_idx; - uint16_t required; uint16_t count; - uint16_t i; + uint16_t i = 0; if (unlikely(dev == NULL)) { VC_LOG_ERR("Invalid vid %i", vid); - return -EINVAL; + return 0; } if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) { VC_LOG_ERR("Invalid qid %u", qid); - return -EINVAL; + return 0; } vcrypto = (struct vhost_crypto *)dev->extern_data; if (unlikely(vcrypto == NULL)) { VC_LOG_ERR("Cannot find required data, is it initialized?"); - return -ENOENT; + return 0; } vq = dev->virtqueue[qid]; - mem = dev->mem; avail_idx = *((volatile uint16_t *)&vq->avail->idx); start_idx = vq->last_used_idx; @@ -1261,27 +1634,66 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, /* for zero copy, we need 2 empty mbufs for src and dst, otherwise * we need only 1 mbuf as src and dst */ - required = count * 2; - if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, (void **)mbufs, - required) < 0)) { - VC_LOG_ERR("Insufficient memory"); - return -ENOMEM; - } + switch (vcrypto->option) { + case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: + if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, + (void **)mbufs, count * 2) < 0)) { + VC_LOG_ERR("Insufficient memory"); + return 0; + } - for (i = 0; i < count; i++) { - uint16_t used_idx = (start_idx + i) & (vq->size - 1); - uint16_t desc_idx = vq->avail->ring[used_idx]; - struct vring_desc *head = &vq->desc[desc_idx]; - struct rte_crypto_op *op = ops[i]; + for (i = 0; i < count; i++) { + uint16_t used_idx = (start_idx + i) & (vq->size - 1); + uint16_t desc_idx = vq->avail->ring[used_idx]; + struct vring_desc *head = &vq->desc[desc_idx]; + struct rte_crypto_op *op = ops[i]; - op->sym->m_src = mbufs[i * 2]; - op->sym->m_dst = mbufs[i * 2 + 1]; - op->sym->m_src->data_off = 0; - op->sym->m_dst->data_off = 0; + op->sym->m_src = mbufs[i * 2]; + op->sym->m_dst = mbufs[i * 2 + 1]; + op->sym->m_src->data_off = 0; + op->sym->m_dst->data_off = 0; + + if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, + op, head, descs, used_idx) < 0)) + break; + } + + if (unlikely(i < count)) + rte_mempool_put_bulk(vcrypto->mbuf_pool, + (void **)&mbufs[i * 2], + (count - i) * 2); + + break; + + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, + (void **)mbufs, count) < 0)) { + VC_LOG_ERR("Insufficient memory"); + return 0; + } + + for (i = 0; i < count; i++) { + uint16_t used_idx = (start_idx + i) & (vq->size - 1); + uint16_t desc_idx = vq->avail->ring[used_idx]; + struct vring_desc *head = &vq->desc[desc_idx]; + struct rte_crypto_op *op = ops[i]; + + op->sym->m_src = mbufs[i]; + op->sym->m_dst = NULL; + op->sym->m_src->data_off = 0; + + if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, + op, head, descs, desc_idx) < 0)) + break; + } + + if (unlikely(i < count)) + rte_mempool_put_bulk(vcrypto->mbuf_pool, + (void **)&mbufs[i], + count - i); + + break; - if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, op, head, - desc_idx, mem)) < 0) - break; } vq->last_used_idx += i; @@ -1289,7 +1701,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, return i; } -uint16_t __rte_experimental +uint16_t rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops, uint16_t nb_ops, int *callfds, uint16_t *nb_callfds) {