1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
4 #include <rte_malloc.h>
8 #include <rte_cryptodev.h>
10 #include "rte_vhost_crypto.h"
12 #include "vhost_user.h"
13 #include "virtio_crypto.h"
15 #define INHDR_LEN (sizeof(struct virtio_crypto_inhdr))
16 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
17 sizeof(struct rte_crypto_sym_op))
19 #ifdef RTE_LIBRTE_VHOST_DEBUG
20 #define VC_LOG_ERR(fmt, args...) \
21 RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n", \
22 "Vhost-Crypto", __func__, __LINE__, ## args)
23 #define VC_LOG_INFO(fmt, args...) \
24 RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n", \
25 "Vhost-Crypto", __func__, __LINE__, ## args)
27 #define VC_LOG_DBG(fmt, args...) \
28 RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n", \
29 "Vhost-Crypto", __func__, __LINE__, ## args)
31 #define VC_LOG_ERR(fmt, args...) \
32 RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
33 #define VC_LOG_INFO(fmt, args...) \
34 RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
35 #define VC_LOG_DBG(fmt, args...)
38 #define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) | \
39 (1 << VIRTIO_RING_F_INDIRECT_DESC) | \
40 (1 << VIRTIO_RING_F_EVENT_IDX) | \
41 (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) | \
42 (1 << VIRTIO_CRYPTO_SERVICE_MAC) | \
43 (1 << VIRTIO_NET_F_CTRL_VQ))
45 #define IOVA_TO_VVA(t, r, a, l, p) \
46 ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
49 cipher_algo_transform(uint32_t virtio_cipher_algo)
53 switch (virtio_cipher_algo) {
54 case VIRTIO_CRYPTO_CIPHER_AES_CBC:
55 ret = RTE_CRYPTO_CIPHER_AES_CBC;
57 case VIRTIO_CRYPTO_CIPHER_AES_CTR:
58 ret = RTE_CRYPTO_CIPHER_AES_CTR;
60 case VIRTIO_CRYPTO_CIPHER_DES_ECB:
61 ret = -VIRTIO_CRYPTO_NOTSUPP;
63 case VIRTIO_CRYPTO_CIPHER_DES_CBC:
64 ret = RTE_CRYPTO_CIPHER_DES_CBC;
66 case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
67 ret = RTE_CRYPTO_CIPHER_3DES_ECB;
69 case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
70 ret = RTE_CRYPTO_CIPHER_3DES_CBC;
72 case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
73 ret = RTE_CRYPTO_CIPHER_3DES_CTR;
75 case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
76 ret = RTE_CRYPTO_CIPHER_KASUMI_F8;
78 case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
79 ret = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
81 case VIRTIO_CRYPTO_CIPHER_AES_F8:
82 ret = RTE_CRYPTO_CIPHER_AES_F8;
84 case VIRTIO_CRYPTO_CIPHER_AES_XTS:
85 ret = RTE_CRYPTO_CIPHER_AES_XTS;
87 case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
88 ret = RTE_CRYPTO_CIPHER_ZUC_EEA3;
91 ret = -VIRTIO_CRYPTO_BADMSG;
99 auth_algo_transform(uint32_t virtio_auth_algo)
103 switch (virtio_auth_algo) {
105 case VIRTIO_CRYPTO_NO_MAC:
106 ret = RTE_CRYPTO_AUTH_NULL;
108 case VIRTIO_CRYPTO_MAC_HMAC_MD5:
109 ret = RTE_CRYPTO_AUTH_MD5_HMAC;
111 case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
112 ret = RTE_CRYPTO_AUTH_SHA1_HMAC;
114 case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
115 ret = RTE_CRYPTO_AUTH_SHA224_HMAC;
117 case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
118 ret = RTE_CRYPTO_AUTH_SHA256_HMAC;
120 case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
121 ret = RTE_CRYPTO_AUTH_SHA384_HMAC;
123 case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
124 ret = RTE_CRYPTO_AUTH_SHA512_HMAC;
126 case VIRTIO_CRYPTO_MAC_CMAC_3DES:
127 ret = -VIRTIO_CRYPTO_NOTSUPP;
129 case VIRTIO_CRYPTO_MAC_CMAC_AES:
130 ret = RTE_CRYPTO_AUTH_AES_CMAC;
132 case VIRTIO_CRYPTO_MAC_KASUMI_F9:
133 ret = RTE_CRYPTO_AUTH_KASUMI_F9;
135 case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
136 ret = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
138 case VIRTIO_CRYPTO_MAC_GMAC_AES:
139 ret = RTE_CRYPTO_AUTH_AES_GMAC;
141 case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
142 ret = -VIRTIO_CRYPTO_NOTSUPP;
144 case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
145 ret = RTE_CRYPTO_AUTH_AES_CBC_MAC;
147 case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
148 ret = -VIRTIO_CRYPTO_NOTSUPP;
150 case VIRTIO_CRYPTO_MAC_XCBC_AES:
151 ret = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
154 ret = -VIRTIO_CRYPTO_BADMSG;
161 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
166 case RTE_CRYPTO_CIPHER_3DES_CBC:
169 case RTE_CRYPTO_CIPHER_3DES_CTR:
172 case RTE_CRYPTO_CIPHER_3DES_ECB:
175 case RTE_CRYPTO_CIPHER_AES_CBC:
179 /* TODO: add common algos */
190 * vhost_crypto struct is used to maintain a number of virtio_cryptos and
191 * one DPDK crypto device that deals with all crypto workloads. It is declared
192 * here and defined in vhost_crypto.c
194 struct vhost_crypto {
195 /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
198 struct rte_hash *session_map;
199 struct rte_mempool *mbuf_pool;
200 struct rte_mempool *sess_pool;
201 struct rte_mempool *sess_priv_pool;
202 struct rte_mempool *wb_pool;
204 /** DPDK cryptodev ID */
208 uint64_t last_session_id;
210 uint64_t cache_session_id;
211 struct rte_cryptodev_sym_session *cache_session;
212 /** socket id for the device */
215 struct virtio_net *dev;
218 } __rte_cache_aligned;
220 struct vhost_crypto_writeback_data {
224 struct vhost_crypto_writeback_data *next;
227 struct vhost_crypto_data_req {
228 struct vring_desc *head;
229 struct virtio_net *dev;
230 struct virtio_crypto_inhdr *inhdr;
231 struct vhost_virtqueue *vq;
232 struct vhost_crypto_writeback_data *wb;
233 struct rte_mempool *wb_pool;
240 transform_cipher_param(struct rte_crypto_sym_xform *xform,
241 VhostUserCryptoSessionParam *param)
245 ret = cipher_algo_transform(param->cipher_algo);
246 if (unlikely(ret < 0))
249 xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
250 xform->cipher.algo = (enum rte_crypto_cipher_algorithm)ret;
251 xform->cipher.key.length = param->cipher_key_len;
252 if (xform->cipher.key.length > 0)
253 xform->cipher.key.data = param->cipher_key_buf;
254 if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
255 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
256 else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
257 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
259 VC_LOG_DBG("Bad operation type");
260 return -VIRTIO_CRYPTO_BADMSG;
263 ret = get_iv_len(xform->cipher.algo);
264 if (unlikely(ret < 0))
266 xform->cipher.iv.length = (uint16_t)ret;
267 xform->cipher.iv.offset = IV_OFFSET;
272 transform_chain_param(struct rte_crypto_sym_xform *xforms,
273 VhostUserCryptoSessionParam *param)
275 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
278 switch (param->chaining_dir) {
279 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
281 xform_cipher = xforms->next;
282 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
283 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
285 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
286 xform_cipher = xforms;
287 xform_auth = xforms->next;
288 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
289 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
292 return -VIRTIO_CRYPTO_BADMSG;
296 ret = cipher_algo_transform(param->cipher_algo);
297 if (unlikely(ret < 0))
299 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
300 xform_cipher->cipher.algo = (enum rte_crypto_cipher_algorithm)ret;
301 xform_cipher->cipher.key.length = param->cipher_key_len;
302 xform_cipher->cipher.key.data = param->cipher_key_buf;
303 ret = get_iv_len(xform_cipher->cipher.algo);
304 if (unlikely(ret < 0))
306 xform_cipher->cipher.iv.length = (uint16_t)ret;
307 xform_cipher->cipher.iv.offset = IV_OFFSET;
310 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
311 ret = auth_algo_transform(param->hash_algo);
312 if (unlikely(ret < 0))
314 xform_auth->auth.algo = (enum rte_crypto_auth_algorithm)ret;
315 xform_auth->auth.digest_length = param->digest_len;
316 xform_auth->auth.key.length = param->auth_key_len;
317 xform_auth->auth.key.data = param->auth_key_buf;
323 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
324 VhostUserCryptoSessionParam *sess_param)
326 struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
327 struct rte_cryptodev_sym_session *session;
330 switch (sess_param->op_type) {
331 case VIRTIO_CRYPTO_SYM_OP_NONE:
332 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
333 ret = transform_cipher_param(&xform1, sess_param);
335 VC_LOG_ERR("Error transform session msg (%i)", ret);
336 sess_param->session_id = ret;
340 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
341 if (unlikely(sess_param->hash_mode !=
342 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
343 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
344 VC_LOG_ERR("Error transform session message (%i)",
345 -VIRTIO_CRYPTO_NOTSUPP);
349 xform1.next = &xform2;
351 ret = transform_chain_param(&xform1, sess_param);
353 VC_LOG_ERR("Error transform session message (%i)", ret);
354 sess_param->session_id = ret;
360 VC_LOG_ERR("Algorithm not yet supported");
361 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
365 session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
367 VC_LOG_ERR("Failed to create session");
368 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
372 if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
373 vcrypto->sess_priv_pool) < 0) {
374 VC_LOG_ERR("Failed to initialize session");
375 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
379 /* insert hash to map */
380 if (rte_hash_add_key_data(vcrypto->session_map,
381 &vcrypto->last_session_id, session) < 0) {
382 VC_LOG_ERR("Failed to insert session to hash table");
384 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
385 VC_LOG_ERR("Failed to clear session");
387 if (rte_cryptodev_sym_session_free(session) < 0)
388 VC_LOG_ERR("Failed to free session");
390 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
394 VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
395 vcrypto->last_session_id, vcrypto->dev->vid);
397 sess_param->session_id = vcrypto->last_session_id;
398 vcrypto->last_session_id++;
402 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
404 struct rte_cryptodev_sym_session *session;
405 uint64_t sess_id = session_id;
408 ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
411 if (unlikely(ret < 0)) {
412 VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
413 return -VIRTIO_CRYPTO_INVSESS;
416 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
417 VC_LOG_DBG("Failed to clear session");
418 return -VIRTIO_CRYPTO_ERR;
421 if (rte_cryptodev_sym_session_free(session) < 0) {
422 VC_LOG_DBG("Failed to free session");
423 return -VIRTIO_CRYPTO_ERR;
426 if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
427 VC_LOG_DBG("Failed to delete session from hash table.");
428 return -VIRTIO_CRYPTO_ERR;
431 VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
437 static enum vh_result
438 vhost_crypto_msg_post_handler(int vid, void *msg)
440 struct virtio_net *dev = get_device(vid);
441 struct vhost_crypto *vcrypto;
442 VhostUserMsg *vmsg = msg;
443 enum vh_result ret = VH_RESULT_OK;
446 VC_LOG_ERR("Invalid vid %i", vid);
447 return VH_RESULT_ERR;
450 vcrypto = dev->extern_data;
451 if (vcrypto == NULL) {
452 VC_LOG_ERR("Cannot find required data, is it initialized?");
453 return VH_RESULT_ERR;
456 if (vmsg->request.master == VHOST_USER_CRYPTO_CREATE_SESS) {
457 vhost_crypto_create_sess(vcrypto,
458 &vmsg->payload.crypto_session);
460 ret = VH_RESULT_REPLY;
461 } else if (vmsg->request.master == VHOST_USER_CRYPTO_CLOSE_SESS) {
462 if (vhost_crypto_close_sess(vcrypto, vmsg->payload.u64))
469 static __rte_always_inline struct vring_desc *
470 find_write_desc(struct vring_desc *head, struct vring_desc *desc,
471 uint32_t *nb_descs, uint32_t vq_size)
473 if (desc->flags & VRING_DESC_F_WRITE)
476 while (desc->flags & VRING_DESC_F_NEXT) {
477 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
481 desc = &head[desc->next];
482 if (desc->flags & VRING_DESC_F_WRITE)
489 static struct virtio_crypto_inhdr *
490 reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
491 uint32_t *nb_descs, uint32_t vq_size)
494 struct virtio_crypto_inhdr *inhdr;
496 while (desc->flags & VRING_DESC_F_NEXT) {
497 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
500 desc = &vc_req->head[desc->next];
504 inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
505 &dlen, VHOST_ACCESS_WO);
506 if (unlikely(!inhdr || dlen != desc->len))
512 static __rte_always_inline int
513 move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
514 uint32_t size, uint32_t *nb_descs, uint32_t vq_size)
516 struct vring_desc *desc = *cur_desc;
517 int left = size - desc->len;
519 while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
521 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
524 desc = &head[desc->next];
525 rte_prefetch0(&head[desc->next]);
529 if (unlikely(left > 0))
532 if (unlikely(*nb_descs == 0))
535 if (unlikely(desc->next >= vq_size))
537 *cur_desc = &head[desc->next];
543 static __rte_always_inline void *
544 get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
548 uint64_t dlen = cur_desc->len;
550 data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
551 if (unlikely(!data || dlen != cur_desc->len)) {
552 VC_LOG_ERR("Failed to map object");
560 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
561 struct vring_desc **cur_desc, uint32_t size,
562 uint32_t *nb_descs, uint32_t vq_size)
564 struct vring_desc *desc = *cur_desc;
565 uint64_t remain, addr, dlen, len;
567 uint8_t *data = dst_data;
571 to_copy = RTE_MIN(desc->len, (uint32_t)left);
573 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
575 if (unlikely(!src || !dlen))
578 rte_memcpy((uint8_t *)data, src, dlen);
581 if (unlikely(dlen < to_copy)) {
582 remain = to_copy - dlen;
583 addr = desc->addr + dlen;
587 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
589 if (unlikely(!src || !len)) {
590 VC_LOG_ERR("Failed to map descriptor");
594 rte_memcpy(data, src, len);
603 while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
604 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
605 VC_LOG_ERR("Invalid descriptors");
610 desc = &vc_req->head[desc->next];
611 rte_prefetch0(&vc_req->head[desc->next]);
612 to_copy = RTE_MIN(desc->len, (uint32_t)left);
614 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
616 if (unlikely(!src || !dlen)) {
617 VC_LOG_ERR("Failed to map descriptor");
621 rte_memcpy(data, src, dlen);
624 if (unlikely(dlen < to_copy)) {
625 remain = to_copy - dlen;
626 addr = desc->addr + dlen;
630 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
632 if (unlikely(!src || !len)) {
633 VC_LOG_ERR("Failed to map descriptor");
637 rte_memcpy(data, src, len);
647 if (unlikely(left > 0)) {
648 VC_LOG_ERR("Incorrect virtio descriptor");
652 if (unlikely(*nb_descs == 0))
655 if (unlikely(desc->next >= vq_size))
657 *cur_desc = &vc_req->head[desc->next];
664 write_back_data(struct vhost_crypto_data_req *vc_req)
666 struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
669 rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
671 wb_data = wb_data->next;
672 rte_mempool_put(vc_req->wb_pool, wb_last);
677 free_wb_data(struct vhost_crypto_writeback_data *wb_data,
678 struct rte_mempool *mp)
680 while (wb_data->next != NULL)
681 free_wb_data(wb_data->next, mp);
683 rte_mempool_put(mp, wb_data);
687 * The function will allocate a vhost_crypto_writeback_data linked list
688 * containing the source and destination data pointers for the write back
689 * operation after dequeued from Cryptodev PMD queues.
692 * The vhost crypto data request pointer
694 * The pointer of the current in use descriptor pointer. The content of
695 * cur_desc is expected to be updated after the function execution.
697 * The last write back data element to be returned. It is used only in cipher
698 * and hash chain operations.
700 * The source data pointer
702 * The offset to both source and destination data. For source data the offset
703 * is the number of bytes between src and start point of cipher operation. For
704 * destination data the offset is the number of bytes from *cur_desc->addr
705 * to the point where the src will be written to.
706 * @param write_back_len
707 * The size of the write back length.
709 * The pointer to the start of the write back data linked list.
711 static struct vhost_crypto_writeback_data *
712 prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
713 struct vring_desc **cur_desc,
714 struct vhost_crypto_writeback_data **end_wb_data,
717 uint64_t write_back_len,
718 uint32_t *nb_descs, uint32_t vq_size)
720 struct vhost_crypto_writeback_data *wb_data, *head;
721 struct vring_desc *desc = *cur_desc;
726 ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
727 if (unlikely(ret < 0)) {
728 VC_LOG_ERR("no memory");
734 if (likely(desc->len > offset)) {
735 wb_data->src = src + offset;
737 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
738 &dlen, VHOST_ACCESS_RW) + offset;
739 if (unlikely(!dst || dlen != desc->len)) {
740 VC_LOG_ERR("Failed to map descriptor");
745 wb_data->len = desc->len - offset;
746 write_back_len -= wb_data->len;
747 src += offset + wb_data->len;
750 if (unlikely(write_back_len)) {
751 ret = rte_mempool_get(vc_req->wb_pool,
752 (void **)&(wb_data->next));
753 if (unlikely(ret < 0)) {
754 VC_LOG_ERR("no memory");
758 wb_data = wb_data->next;
760 wb_data->next = NULL;
764 while (write_back_len) {
765 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
766 VC_LOG_ERR("Invalid descriptors");
771 desc = &vc_req->head[desc->next];
772 if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
773 VC_LOG_ERR("incorrect descriptor");
777 if (desc->len <= offset) {
783 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
784 VHOST_ACCESS_RW) + offset;
785 if (unlikely(dst == NULL || dlen != desc->len)) {
786 VC_LOG_ERR("Failed to map descriptor");
792 wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
793 write_back_len -= wb_data->len;
797 if (write_back_len) {
798 ret = rte_mempool_get(vc_req->wb_pool,
799 (void **)&(wb_data->next));
800 if (unlikely(ret < 0)) {
801 VC_LOG_ERR("no memory");
805 wb_data = wb_data->next;
807 wb_data->next = NULL;
810 if (unlikely(*nb_descs == 0))
813 if (unlikely(desc->next >= vq_size))
815 *cur_desc = &vc_req->head[desc->next];
818 *end_wb_data = wb_data;
824 free_wb_data(head, vc_req->wb_pool);
830 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
831 struct vhost_crypto_data_req *vc_req,
832 struct virtio_crypto_cipher_data_req *cipher,
833 struct vring_desc *cur_desc,
834 uint32_t *nb_descs, uint32_t vq_size)
836 struct vring_desc *desc = cur_desc;
837 struct vhost_crypto_writeback_data *ewb = NULL;
838 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
839 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
844 if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
845 nb_descs, vq_size) < 0)) {
846 ret = VIRTIO_CRYPTO_BADMSG;
850 m_src->data_len = cipher->para.src_data_len;
852 switch (vcrypto->option) {
853 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
854 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
855 cipher->para.src_data_len);
856 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
857 if (unlikely(m_src->buf_iova == 0 ||
858 m_src->buf_addr == NULL)) {
859 VC_LOG_ERR("zero_copy may fail due to cross page data");
860 ret = VIRTIO_CRYPTO_ERR;
864 if (unlikely(move_desc(vc_req->head, &desc,
865 cipher->para.src_data_len, nb_descs,
867 VC_LOG_ERR("Incorrect descriptor");
868 ret = VIRTIO_CRYPTO_ERR;
873 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
874 vc_req->wb_pool = vcrypto->wb_pool;
876 if (unlikely(cipher->para.src_data_len >
877 RTE_MBUF_DEFAULT_BUF_SIZE)) {
878 VC_LOG_ERR("Not enough space to do data copy");
879 ret = VIRTIO_CRYPTO_ERR;
882 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
883 vc_req, &desc, cipher->para.src_data_len,
884 nb_descs, vq_size) < 0)) {
885 ret = VIRTIO_CRYPTO_BADMSG;
890 ret = VIRTIO_CRYPTO_BADMSG;
895 desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
896 if (unlikely(!desc)) {
897 VC_LOG_ERR("Cannot find write location");
898 ret = VIRTIO_CRYPTO_BADMSG;
902 switch (vcrypto->option) {
903 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
904 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
905 desc->addr, cipher->para.dst_data_len);
906 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
907 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
908 VC_LOG_ERR("zero_copy may fail due to cross page data");
909 ret = VIRTIO_CRYPTO_ERR;
913 if (unlikely(move_desc(vc_req->head, &desc,
914 cipher->para.dst_data_len,
915 nb_descs, vq_size) < 0)) {
916 VC_LOG_ERR("Incorrect descriptor");
917 ret = VIRTIO_CRYPTO_ERR;
921 m_dst->data_len = cipher->para.dst_data_len;
923 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
924 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
925 rte_pktmbuf_mtod(m_src, uint8_t *), 0,
926 cipher->para.dst_data_len, nb_descs, vq_size);
927 if (unlikely(vc_req->wb == NULL)) {
928 ret = VIRTIO_CRYPTO_ERR;
934 ret = VIRTIO_CRYPTO_BADMSG;
939 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
940 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
942 op->sym->cipher.data.offset = 0;
943 op->sym->cipher.data.length = cipher->para.src_data_len;
945 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
946 if (unlikely(vc_req->inhdr == NULL)) {
947 ret = VIRTIO_CRYPTO_BADMSG;
951 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
952 vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
958 free_wb_data(vc_req->wb, vc_req->wb_pool);
960 vc_req->len = INHDR_LEN;
965 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
966 struct vhost_crypto_data_req *vc_req,
967 struct virtio_crypto_alg_chain_data_req *chain,
968 struct vring_desc *cur_desc,
969 uint32_t *nb_descs, uint32_t vq_size)
971 struct vring_desc *desc = cur_desc, *digest_desc;
972 struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
973 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
974 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
975 uint32_t digest_offset;
981 if (unlikely(copy_data(iv_data, vc_req, &desc,
982 chain->para.iv_len, nb_descs, vq_size) < 0)) {
983 ret = VIRTIO_CRYPTO_BADMSG;
987 m_src->data_len = chain->para.src_data_len;
989 switch (vcrypto->option) {
990 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
991 m_dst->data_len = chain->para.dst_data_len;
993 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
994 chain->para.src_data_len);
995 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
996 if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
997 VC_LOG_ERR("zero_copy may fail due to cross page data");
998 ret = VIRTIO_CRYPTO_ERR;
1002 if (unlikely(move_desc(vc_req->head, &desc,
1003 chain->para.src_data_len,
1004 nb_descs, vq_size) < 0)) {
1005 VC_LOG_ERR("Incorrect descriptor");
1006 ret = VIRTIO_CRYPTO_ERR;
1010 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1011 vc_req->wb_pool = vcrypto->wb_pool;
1013 if (unlikely(chain->para.src_data_len >
1014 RTE_MBUF_DEFAULT_BUF_SIZE)) {
1015 VC_LOG_ERR("Not enough space to do data copy");
1016 ret = VIRTIO_CRYPTO_ERR;
1019 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
1020 vc_req, &desc, chain->para.src_data_len,
1021 nb_descs, vq_size)) < 0) {
1022 ret = VIRTIO_CRYPTO_BADMSG;
1028 ret = VIRTIO_CRYPTO_BADMSG;
1033 desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
1034 if (unlikely(!desc)) {
1035 VC_LOG_ERR("Cannot find write location");
1036 ret = VIRTIO_CRYPTO_BADMSG;
1040 switch (vcrypto->option) {
1041 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1042 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
1043 desc->addr, chain->para.dst_data_len);
1044 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
1045 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
1046 VC_LOG_ERR("zero_copy may fail due to cross page data");
1047 ret = VIRTIO_CRYPTO_ERR;
1051 if (unlikely(move_desc(vc_req->head, &desc,
1052 chain->para.dst_data_len,
1053 nb_descs, vq_size) < 0)) {
1054 VC_LOG_ERR("Incorrect descriptor");
1055 ret = VIRTIO_CRYPTO_ERR;
1059 op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
1060 desc->addr, chain->para.hash_result_len);
1061 op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
1063 if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
1064 VC_LOG_ERR("zero_copy may fail due to cross page data");
1065 ret = VIRTIO_CRYPTO_ERR;
1069 if (unlikely(move_desc(vc_req->head, &desc,
1070 chain->para.hash_result_len,
1071 nb_descs, vq_size) < 0)) {
1072 VC_LOG_ERR("Incorrect descriptor");
1073 ret = VIRTIO_CRYPTO_ERR;
1078 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1079 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
1080 rte_pktmbuf_mtod(m_src, uint8_t *),
1081 chain->para.cipher_start_src_offset,
1082 chain->para.dst_data_len -
1083 chain->para.cipher_start_src_offset,
1085 if (unlikely(vc_req->wb == NULL)) {
1086 ret = VIRTIO_CRYPTO_ERR;
1090 digest_offset = m_src->data_len;
1091 digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
1095 /** create a wb_data for digest */
1096 ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
1097 digest_addr, 0, chain->para.hash_result_len,
1099 if (unlikely(ewb->next == NULL)) {
1100 ret = VIRTIO_CRYPTO_ERR;
1104 if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
1105 chain->para.hash_result_len,
1106 nb_descs, vq_size)) < 0) {
1107 ret = VIRTIO_CRYPTO_BADMSG;
1111 op->sym->auth.digest.data = digest_addr;
1112 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
1116 ret = VIRTIO_CRYPTO_BADMSG;
1121 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
1122 if (unlikely(vc_req->inhdr == NULL)) {
1123 ret = VIRTIO_CRYPTO_BADMSG;
1127 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
1129 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1130 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1132 op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
1133 op->sym->cipher.data.length = chain->para.src_data_len -
1134 chain->para.cipher_start_src_offset;
1136 op->sym->auth.data.offset = chain->para.hash_start_src_offset;
1137 op->sym->auth.data.length = chain->para.len_to_hash;
1139 vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
1145 free_wb_data(vc_req->wb, vc_req->wb_pool);
1146 vc_req->len = INHDR_LEN;
1151 * Process on descriptor
1153 static __rte_always_inline int
1154 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
1155 struct vhost_virtqueue *vq, struct rte_crypto_op *op,
1156 struct vring_desc *head, uint16_t desc_idx)
1158 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
1159 struct rte_cryptodev_sym_session *session;
1160 struct virtio_crypto_op_data_req *req, tmp_req;
1161 struct virtio_crypto_inhdr *inhdr;
1162 struct vring_desc *desc = NULL;
1163 uint64_t session_id;
1165 uint32_t nb_descs = vq->size;
1168 vc_req->desc_idx = desc_idx;
1169 vc_req->dev = vcrypto->dev;
1172 if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
1174 nb_descs = dlen / sizeof(struct vring_desc);
1175 /* drop invalid descriptors */
1176 if (unlikely(nb_descs > vq->size))
1178 desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
1179 &dlen, VHOST_ACCESS_RO);
1180 if (unlikely(!desc || dlen != head->len))
1188 vc_req->head = head;
1189 vc_req->zero_copy = vcrypto->option;
1191 req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
1192 if (unlikely(req == NULL)) {
1193 switch (vcrypto->option) {
1194 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1195 err = VIRTIO_CRYPTO_BADMSG;
1196 VC_LOG_ERR("Invalid descriptor");
1198 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1200 if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
1201 &nb_descs, vq->size) < 0)) {
1202 err = VIRTIO_CRYPTO_BADMSG;
1203 VC_LOG_ERR("Invalid descriptor");
1208 err = VIRTIO_CRYPTO_ERR;
1209 VC_LOG_ERR("Invalid option");
1213 if (unlikely(move_desc(vc_req->head, &desc,
1214 sizeof(*req), &nb_descs, vq->size) < 0)) {
1215 VC_LOG_ERR("Incorrect descriptor");
1220 switch (req->header.opcode) {
1221 case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
1222 case VIRTIO_CRYPTO_CIPHER_DECRYPT:
1223 session_id = req->header.session_id;
1225 /* one branch to avoid unnecessary table lookup */
1226 if (vcrypto->cache_session_id != session_id) {
1227 err = rte_hash_lookup_data(vcrypto->session_map,
1228 &session_id, (void **)&session);
1229 if (unlikely(err < 0)) {
1230 err = VIRTIO_CRYPTO_ERR;
1231 VC_LOG_ERR("Failed to find session %"PRIu64,
1236 vcrypto->cache_session = session;
1237 vcrypto->cache_session_id = session_id;
1240 session = vcrypto->cache_session;
1242 err = rte_crypto_op_attach_sym_session(op, session);
1243 if (unlikely(err < 0)) {
1244 err = VIRTIO_CRYPTO_ERR;
1245 VC_LOG_ERR("Failed to attach session to op");
1249 switch (req->u.sym_req.op_type) {
1250 case VIRTIO_CRYPTO_SYM_OP_NONE:
1251 err = VIRTIO_CRYPTO_NOTSUPP;
1253 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1254 err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1255 &req->u.sym_req.u.cipher, desc,
1256 &nb_descs, vq->size);
1258 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1259 err = prepare_sym_chain_op(vcrypto, op, vc_req,
1260 &req->u.sym_req.u.chain, desc,
1261 &nb_descs, vq->size);
1264 if (unlikely(err != 0)) {
1265 VC_LOG_ERR("Failed to process sym request");
1270 VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1271 req->header.opcode);
1279 inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size);
1280 if (likely(inhdr != NULL))
1281 inhdr->status = (uint8_t)err;
1286 static __rte_always_inline struct vhost_virtqueue *
1287 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1288 struct vhost_virtqueue *old_vq)
1290 struct rte_mbuf *m_src = op->sym->m_src;
1291 struct rte_mbuf *m_dst = op->sym->m_dst;
1292 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1295 if (unlikely(!vc_req)) {
1296 VC_LOG_ERR("Failed to retrieve vc_req");
1300 if (old_vq && (vc_req->vq != old_vq))
1303 desc_idx = vc_req->desc_idx;
1305 if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1306 vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1308 if (vc_req->zero_copy == 0)
1309 write_back_data(vc_req);
1312 vc_req->vq->used->ring[desc_idx].id = desc_idx;
1313 vc_req->vq->used->ring[desc_idx].len = vc_req->len;
1315 rte_mempool_put(m_src->pool, (void *)m_src);
1318 rte_mempool_put(m_dst->pool, (void *)m_dst);
1323 static __rte_always_inline uint16_t
1324 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1325 uint16_t nb_ops, int *callfd)
1327 uint16_t processed = 1;
1328 struct vhost_virtqueue *vq, *tmp_vq;
1330 if (unlikely(nb_ops == 0))
1333 vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1334 if (unlikely(vq == NULL))
1338 while ((processed < nb_ops)) {
1339 tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1342 if (unlikely(vq != tmp_vq))
1348 *callfd = vq->callfd;
1350 *(volatile uint16_t *)&vq->used->idx += processed;
1355 int __rte_experimental
1356 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1357 struct rte_mempool *sess_pool,
1358 struct rte_mempool *sess_priv_pool,
1361 struct virtio_net *dev = get_device(vid);
1362 struct rte_hash_parameters params = {0};
1363 struct vhost_crypto *vcrypto;
1368 VC_LOG_ERR("Invalid vid %i", vid);
1372 ret = rte_vhost_driver_set_features(dev->ifname,
1373 VIRTIO_CRYPTO_FEATURES);
1375 VC_LOG_ERR("Error setting features");
1379 vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1380 RTE_CACHE_LINE_SIZE, socket_id);
1382 VC_LOG_ERR("Insufficient memory");
1386 vcrypto->sess_pool = sess_pool;
1387 vcrypto->sess_priv_pool = sess_priv_pool;
1388 vcrypto->cid = cryptodev_id;
1389 vcrypto->cache_session_id = UINT64_MAX;
1390 vcrypto->last_session_id = 1;
1392 vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1394 snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1396 params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1397 params.hash_func = rte_jhash;
1398 params.key_len = sizeof(uint64_t);
1399 params.socket_id = socket_id;
1400 vcrypto->session_map = rte_hash_create(¶ms);
1401 if (!vcrypto->session_map) {
1402 VC_LOG_ERR("Failed to creath session map");
1407 snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1408 vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1409 VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1410 sizeof(struct vhost_crypto_data_req),
1411 RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM,
1413 if (!vcrypto->mbuf_pool) {
1414 VC_LOG_ERR("Failed to creath mbuf pool");
1419 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1420 vcrypto->wb_pool = rte_mempool_create(name,
1421 VHOST_CRYPTO_MBUF_POOL_SIZE,
1422 sizeof(struct vhost_crypto_writeback_data),
1423 128, 0, NULL, NULL, NULL, NULL,
1424 rte_socket_id(), 0);
1425 if (!vcrypto->wb_pool) {
1426 VC_LOG_ERR("Failed to creath mempool");
1431 dev->extern_data = vcrypto;
1432 dev->extern_ops.pre_msg_handle = NULL;
1433 dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1438 if (vcrypto->session_map)
1439 rte_hash_free(vcrypto->session_map);
1440 if (vcrypto->mbuf_pool)
1441 rte_mempool_free(vcrypto->mbuf_pool);
1448 int __rte_experimental
1449 rte_vhost_crypto_free(int vid)
1451 struct virtio_net *dev = get_device(vid);
1452 struct vhost_crypto *vcrypto;
1454 if (unlikely(dev == NULL)) {
1455 VC_LOG_ERR("Invalid vid %i", vid);
1459 vcrypto = dev->extern_data;
1460 if (unlikely(vcrypto == NULL)) {
1461 VC_LOG_ERR("Cannot find required data, is it initialized?");
1465 rte_hash_free(vcrypto->session_map);
1466 rte_mempool_free(vcrypto->mbuf_pool);
1467 rte_mempool_free(vcrypto->wb_pool);
1470 dev->extern_data = NULL;
1471 dev->extern_ops.pre_msg_handle = NULL;
1472 dev->extern_ops.post_msg_handle = NULL;
1477 int __rte_experimental
1478 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1480 struct virtio_net *dev = get_device(vid);
1481 struct vhost_crypto *vcrypto;
1483 if (unlikely(dev == NULL)) {
1484 VC_LOG_ERR("Invalid vid %i", vid);
1488 if (unlikely((uint32_t)option >=
1489 RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1490 VC_LOG_ERR("Invalid option %i", option);
1494 vcrypto = (struct vhost_crypto *)dev->extern_data;
1495 if (unlikely(vcrypto == NULL)) {
1496 VC_LOG_ERR("Cannot find required data, is it initialized?");
1500 if (vcrypto->option == (uint8_t)option)
1503 if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
1504 !(rte_mempool_full(vcrypto->wb_pool))) {
1505 VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1509 if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
1512 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1513 vcrypto->wb_pool = rte_mempool_create(name,
1514 VHOST_CRYPTO_MBUF_POOL_SIZE,
1515 sizeof(struct vhost_crypto_writeback_data),
1516 128, 0, NULL, NULL, NULL, NULL,
1517 rte_socket_id(), 0);
1518 if (!vcrypto->wb_pool) {
1519 VC_LOG_ERR("Failed to creath mbuf pool");
1523 rte_mempool_free(vcrypto->wb_pool);
1524 vcrypto->wb_pool = NULL;
1527 vcrypto->option = (uint8_t)option;
1532 uint16_t __rte_experimental
1533 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1534 struct rte_crypto_op **ops, uint16_t nb_ops)
1536 struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1537 struct virtio_net *dev = get_device(vid);
1538 struct vhost_crypto *vcrypto;
1539 struct vhost_virtqueue *vq;
1545 if (unlikely(dev == NULL)) {
1546 VC_LOG_ERR("Invalid vid %i", vid);
1550 if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1551 VC_LOG_ERR("Invalid qid %u", qid);
1555 vcrypto = (struct vhost_crypto *)dev->extern_data;
1556 if (unlikely(vcrypto == NULL)) {
1557 VC_LOG_ERR("Cannot find required data, is it initialized?");
1561 vq = dev->virtqueue[qid];
1563 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1564 start_idx = vq->last_used_idx;
1565 count = avail_idx - start_idx;
1566 count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1567 count = RTE_MIN(count, nb_ops);
1569 if (unlikely(count == 0))
1572 /* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1573 * we need only 1 mbuf as src and dst
1575 switch (vcrypto->option) {
1576 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1577 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1578 (void **)mbufs, count * 2) < 0)) {
1579 VC_LOG_ERR("Insufficient memory");
1583 for (i = 0; i < count; i++) {
1584 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1585 uint16_t desc_idx = vq->avail->ring[used_idx];
1586 struct vring_desc *head = &vq->desc[desc_idx];
1587 struct rte_crypto_op *op = ops[i];
1589 op->sym->m_src = mbufs[i * 2];
1590 op->sym->m_dst = mbufs[i * 2 + 1];
1591 op->sym->m_src->data_off = 0;
1592 op->sym->m_dst->data_off = 0;
1594 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1595 op, head, desc_idx)) < 0)
1599 if (unlikely(i < count))
1600 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1601 (void **)&mbufs[i * 2],
1606 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1607 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1608 (void **)mbufs, count) < 0)) {
1609 VC_LOG_ERR("Insufficient memory");
1613 for (i = 0; i < count; i++) {
1614 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1615 uint16_t desc_idx = vq->avail->ring[used_idx];
1616 struct vring_desc *head = &vq->desc[desc_idx];
1617 struct rte_crypto_op *op = ops[i];
1619 op->sym->m_src = mbufs[i];
1620 op->sym->m_dst = NULL;
1621 op->sym->m_src->data_off = 0;
1623 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1624 op, head, desc_idx)) < 0)
1628 if (unlikely(i < count))
1629 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1637 vq->last_used_idx += i;
1642 uint16_t __rte_experimental
1643 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1644 uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1646 struct rte_crypto_op **tmp_ops = ops;
1647 uint16_t count = 0, left = nb_ops;
1652 count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1654 if (unlikely(count == 0))
1657 tmp_ops = &tmp_ops[count];
1660 callfds[idx++] = callfd;
1662 if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1663 VC_LOG_ERR("Too many vqs");
1670 return nb_ops - left;