1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
4 #include <rte_malloc.h>
8 #include <rte_cryptodev.h>
10 #include "rte_vhost_crypto.h"
12 #include "vhost_user.h"
13 #include "virtio_crypto.h"
15 #define INHDR_LEN (sizeof(struct virtio_crypto_inhdr))
16 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
17 sizeof(struct rte_crypto_sym_op))
19 #ifdef RTE_LIBRTE_VHOST_DEBUG
20 #define VC_LOG_ERR(fmt, args...) \
21 RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n", \
22 "Vhost-Crypto", __func__, __LINE__, ## args)
23 #define VC_LOG_INFO(fmt, args...) \
24 RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n", \
25 "Vhost-Crypto", __func__, __LINE__, ## args)
27 #define VC_LOG_DBG(fmt, args...) \
28 RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n", \
29 "Vhost-Crypto", __func__, __LINE__, ## args)
31 #define VC_LOG_ERR(fmt, args...) \
32 RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
33 #define VC_LOG_INFO(fmt, args...) \
34 RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
35 #define VC_LOG_DBG(fmt, args...)
38 #define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) | \
39 (1 << VIRTIO_RING_F_INDIRECT_DESC) | \
40 (1 << VIRTIO_RING_F_EVENT_IDX) | \
41 (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) | \
42 (1 << VIRTIO_CRYPTO_SERVICE_MAC) | \
43 (1 << VIRTIO_NET_F_CTRL_VQ))
45 #define IOVA_TO_VVA(t, r, a, l, p) \
46 ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
49 cipher_algo_transform(uint32_t virtio_cipher_algo,
50 enum rte_crypto_cipher_algorithm *algo)
52 switch (virtio_cipher_algo) {
53 case VIRTIO_CRYPTO_CIPHER_AES_CBC:
54 *algo = RTE_CRYPTO_CIPHER_AES_CBC;
56 case VIRTIO_CRYPTO_CIPHER_AES_CTR:
57 *algo = RTE_CRYPTO_CIPHER_AES_CTR;
59 case VIRTIO_CRYPTO_CIPHER_DES_ECB:
60 *algo = -VIRTIO_CRYPTO_NOTSUPP;
62 case VIRTIO_CRYPTO_CIPHER_DES_CBC:
63 *algo = RTE_CRYPTO_CIPHER_DES_CBC;
65 case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
66 *algo = RTE_CRYPTO_CIPHER_3DES_ECB;
68 case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
69 *algo = RTE_CRYPTO_CIPHER_3DES_CBC;
71 case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
72 *algo = RTE_CRYPTO_CIPHER_3DES_CTR;
74 case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
75 *algo = RTE_CRYPTO_CIPHER_KASUMI_F8;
77 case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
78 *algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
80 case VIRTIO_CRYPTO_CIPHER_AES_F8:
81 *algo = RTE_CRYPTO_CIPHER_AES_F8;
83 case VIRTIO_CRYPTO_CIPHER_AES_XTS:
84 *algo = RTE_CRYPTO_CIPHER_AES_XTS;
86 case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
87 *algo = RTE_CRYPTO_CIPHER_ZUC_EEA3;
90 return -VIRTIO_CRYPTO_BADMSG;
98 auth_algo_transform(uint32_t virtio_auth_algo,
99 enum rte_crypto_auth_algorithm *algo)
101 switch (virtio_auth_algo) {
102 case VIRTIO_CRYPTO_NO_MAC:
103 *algo = RTE_CRYPTO_AUTH_NULL;
105 case VIRTIO_CRYPTO_MAC_HMAC_MD5:
106 *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
108 case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
109 *algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
111 case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
112 *algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
114 case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
115 *algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
117 case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
118 *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
120 case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
121 *algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
123 case VIRTIO_CRYPTO_MAC_CMAC_AES:
124 *algo = RTE_CRYPTO_AUTH_AES_CMAC;
126 case VIRTIO_CRYPTO_MAC_KASUMI_F9:
127 *algo = RTE_CRYPTO_AUTH_KASUMI_F9;
129 case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
130 *algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
132 case VIRTIO_CRYPTO_MAC_GMAC_AES:
133 *algo = RTE_CRYPTO_AUTH_AES_GMAC;
135 case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
136 *algo = RTE_CRYPTO_AUTH_AES_CBC_MAC;
138 case VIRTIO_CRYPTO_MAC_XCBC_AES:
139 *algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
141 case VIRTIO_CRYPTO_MAC_CMAC_3DES:
142 case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
143 case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
144 return -VIRTIO_CRYPTO_NOTSUPP;
146 return -VIRTIO_CRYPTO_BADMSG;
152 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
157 case RTE_CRYPTO_CIPHER_3DES_CBC:
160 case RTE_CRYPTO_CIPHER_3DES_CTR:
163 case RTE_CRYPTO_CIPHER_3DES_ECB:
166 case RTE_CRYPTO_CIPHER_AES_CBC:
170 /* TODO: add common algos */
181 * vhost_crypto struct is used to maintain a number of virtio_cryptos and
182 * one DPDK crypto device that deals with all crypto workloads. It is declared
183 * here and defined in vhost_crypto.c
185 struct vhost_crypto {
186 /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
189 struct rte_hash *session_map;
190 struct rte_mempool *mbuf_pool;
191 struct rte_mempool *sess_pool;
192 struct rte_mempool *sess_priv_pool;
193 struct rte_mempool *wb_pool;
195 /** DPDK cryptodev ID */
199 uint64_t last_session_id;
201 uint64_t cache_session_id;
202 struct rte_cryptodev_sym_session *cache_session;
203 /** socket id for the device */
206 struct virtio_net *dev;
209 } __rte_cache_aligned;
211 struct vhost_crypto_writeback_data {
215 struct vhost_crypto_writeback_data *next;
218 struct vhost_crypto_data_req {
219 struct vring_desc *head;
220 struct virtio_net *dev;
221 struct virtio_crypto_inhdr *inhdr;
222 struct vhost_virtqueue *vq;
223 struct vhost_crypto_writeback_data *wb;
224 struct rte_mempool *wb_pool;
231 transform_cipher_param(struct rte_crypto_sym_xform *xform,
232 VhostUserCryptoSessionParam *param)
236 ret = cipher_algo_transform(param->cipher_algo, &xform->cipher.algo);
237 if (unlikely(ret < 0))
240 xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
241 xform->cipher.key.length = param->cipher_key_len;
242 if (xform->cipher.key.length > 0)
243 xform->cipher.key.data = param->cipher_key_buf;
244 if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
245 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
246 else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
247 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
249 VC_LOG_DBG("Bad operation type");
250 return -VIRTIO_CRYPTO_BADMSG;
253 ret = get_iv_len(xform->cipher.algo);
254 if (unlikely(ret < 0))
256 xform->cipher.iv.length = (uint16_t)ret;
257 xform->cipher.iv.offset = IV_OFFSET;
262 transform_chain_param(struct rte_crypto_sym_xform *xforms,
263 VhostUserCryptoSessionParam *param)
265 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
268 switch (param->chaining_dir) {
269 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
271 xform_cipher = xforms->next;
272 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
273 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
275 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
276 xform_cipher = xforms;
277 xform_auth = xforms->next;
278 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
279 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
282 return -VIRTIO_CRYPTO_BADMSG;
286 ret = cipher_algo_transform(param->cipher_algo,
287 &xform_cipher->cipher.algo);
288 if (unlikely(ret < 0))
290 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
291 xform_cipher->cipher.key.length = param->cipher_key_len;
292 xform_cipher->cipher.key.data = param->cipher_key_buf;
293 ret = get_iv_len(xform_cipher->cipher.algo);
294 if (unlikely(ret < 0))
296 xform_cipher->cipher.iv.length = (uint16_t)ret;
297 xform_cipher->cipher.iv.offset = IV_OFFSET;
300 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
301 ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo);
302 if (unlikely(ret < 0))
304 xform_auth->auth.digest_length = param->digest_len;
305 xform_auth->auth.key.length = param->auth_key_len;
306 xform_auth->auth.key.data = param->auth_key_buf;
312 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
313 VhostUserCryptoSessionParam *sess_param)
315 struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
316 struct rte_cryptodev_sym_session *session;
319 switch (sess_param->op_type) {
320 case VIRTIO_CRYPTO_SYM_OP_NONE:
321 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
322 ret = transform_cipher_param(&xform1, sess_param);
324 VC_LOG_ERR("Error transform session msg (%i)", ret);
325 sess_param->session_id = ret;
329 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
330 if (unlikely(sess_param->hash_mode !=
331 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
332 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
333 VC_LOG_ERR("Error transform session message (%i)",
334 -VIRTIO_CRYPTO_NOTSUPP);
338 xform1.next = &xform2;
340 ret = transform_chain_param(&xform1, sess_param);
342 VC_LOG_ERR("Error transform session message (%i)", ret);
343 sess_param->session_id = ret;
349 VC_LOG_ERR("Algorithm not yet supported");
350 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
354 session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
356 VC_LOG_ERR("Failed to create session");
357 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
361 if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
362 vcrypto->sess_priv_pool) < 0) {
363 VC_LOG_ERR("Failed to initialize session");
364 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
368 /* insert hash to map */
369 if (rte_hash_add_key_data(vcrypto->session_map,
370 &vcrypto->last_session_id, session) < 0) {
371 VC_LOG_ERR("Failed to insert session to hash table");
373 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
374 VC_LOG_ERR("Failed to clear session");
376 if (rte_cryptodev_sym_session_free(session) < 0)
377 VC_LOG_ERR("Failed to free session");
379 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
383 VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
384 vcrypto->last_session_id, vcrypto->dev->vid);
386 sess_param->session_id = vcrypto->last_session_id;
387 vcrypto->last_session_id++;
391 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
393 struct rte_cryptodev_sym_session *session;
394 uint64_t sess_id = session_id;
397 ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
400 if (unlikely(ret < 0)) {
401 VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
402 return -VIRTIO_CRYPTO_INVSESS;
405 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
406 VC_LOG_DBG("Failed to clear session");
407 return -VIRTIO_CRYPTO_ERR;
410 if (rte_cryptodev_sym_session_free(session) < 0) {
411 VC_LOG_DBG("Failed to free session");
412 return -VIRTIO_CRYPTO_ERR;
415 if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
416 VC_LOG_DBG("Failed to delete session from hash table.");
417 return -VIRTIO_CRYPTO_ERR;
420 VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
426 static enum rte_vhost_msg_result
427 vhost_crypto_msg_post_handler(int vid, void *msg)
429 struct virtio_net *dev = get_device(vid);
430 struct vhost_crypto *vcrypto;
431 VhostUserMsg *vmsg = msg;
432 enum rte_vhost_msg_result ret = RTE_VHOST_MSG_RESULT_OK;
435 VC_LOG_ERR("Invalid vid %i", vid);
436 return RTE_VHOST_MSG_RESULT_ERR;
439 vcrypto = dev->extern_data;
440 if (vcrypto == NULL) {
441 VC_LOG_ERR("Cannot find required data, is it initialized?");
442 return RTE_VHOST_MSG_RESULT_ERR;
445 switch (vmsg->request.master) {
446 case VHOST_USER_CRYPTO_CREATE_SESS:
447 vhost_crypto_create_sess(vcrypto,
448 &vmsg->payload.crypto_session);
450 ret = RTE_VHOST_MSG_RESULT_REPLY;
452 case VHOST_USER_CRYPTO_CLOSE_SESS:
453 if (vhost_crypto_close_sess(vcrypto, vmsg->payload.u64))
454 ret = RTE_VHOST_MSG_RESULT_ERR;
457 ret = RTE_VHOST_MSG_RESULT_NOT_HANDLED;
464 static __rte_always_inline struct vring_desc *
465 find_write_desc(struct vring_desc *head, struct vring_desc *desc,
466 uint32_t *nb_descs, uint32_t vq_size)
468 if (desc->flags & VRING_DESC_F_WRITE)
471 while (desc->flags & VRING_DESC_F_NEXT) {
472 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
476 desc = &head[desc->next];
477 if (desc->flags & VRING_DESC_F_WRITE)
484 static struct virtio_crypto_inhdr *
485 reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
486 uint32_t *nb_descs, uint32_t vq_size)
489 struct virtio_crypto_inhdr *inhdr;
491 while (desc->flags & VRING_DESC_F_NEXT) {
492 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
495 desc = &vc_req->head[desc->next];
499 inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
500 &dlen, VHOST_ACCESS_WO);
501 if (unlikely(!inhdr || dlen != desc->len))
507 static __rte_always_inline int
508 move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
509 uint32_t size, uint32_t *nb_descs, uint32_t vq_size)
511 struct vring_desc *desc = *cur_desc;
512 int left = size - desc->len;
514 while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
516 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
519 desc = &head[desc->next];
520 rte_prefetch0(&head[desc->next]);
524 if (unlikely(left > 0))
527 if (unlikely(*nb_descs == 0))
530 if (unlikely(desc->next >= vq_size))
532 *cur_desc = &head[desc->next];
538 static __rte_always_inline void *
539 get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
543 uint64_t dlen = cur_desc->len;
545 data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
546 if (unlikely(!data || dlen != cur_desc->len)) {
547 VC_LOG_ERR("Failed to map object");
555 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
556 struct vring_desc **cur_desc, uint32_t size,
557 uint32_t *nb_descs, uint32_t vq_size)
559 struct vring_desc *desc = *cur_desc;
560 uint64_t remain, addr, dlen, len;
562 uint8_t *data = dst_data;
566 to_copy = RTE_MIN(desc->len, (uint32_t)left);
568 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
570 if (unlikely(!src || !dlen))
573 rte_memcpy((uint8_t *)data, src, dlen);
576 if (unlikely(dlen < to_copy)) {
577 remain = to_copy - dlen;
578 addr = desc->addr + dlen;
582 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
584 if (unlikely(!src || !len)) {
585 VC_LOG_ERR("Failed to map descriptor");
589 rte_memcpy(data, src, len);
598 while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
599 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
600 VC_LOG_ERR("Invalid descriptors");
605 desc = &vc_req->head[desc->next];
606 rte_prefetch0(&vc_req->head[desc->next]);
607 to_copy = RTE_MIN(desc->len, (uint32_t)left);
609 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
611 if (unlikely(!src || !dlen)) {
612 VC_LOG_ERR("Failed to map descriptor");
616 rte_memcpy(data, src, dlen);
619 if (unlikely(dlen < to_copy)) {
620 remain = to_copy - dlen;
621 addr = desc->addr + dlen;
625 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
627 if (unlikely(!src || !len)) {
628 VC_LOG_ERR("Failed to map descriptor");
632 rte_memcpy(data, src, len);
642 if (unlikely(left > 0)) {
643 VC_LOG_ERR("Incorrect virtio descriptor");
647 if (unlikely(*nb_descs == 0))
650 if (unlikely(desc->next >= vq_size))
652 *cur_desc = &vc_req->head[desc->next];
659 write_back_data(struct vhost_crypto_data_req *vc_req)
661 struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
664 rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
666 wb_data = wb_data->next;
667 rte_mempool_put(vc_req->wb_pool, wb_last);
672 free_wb_data(struct vhost_crypto_writeback_data *wb_data,
673 struct rte_mempool *mp)
675 while (wb_data->next != NULL)
676 free_wb_data(wb_data->next, mp);
678 rte_mempool_put(mp, wb_data);
682 * The function will allocate a vhost_crypto_writeback_data linked list
683 * containing the source and destination data pointers for the write back
684 * operation after dequeued from Cryptodev PMD queues.
687 * The vhost crypto data request pointer
689 * The pointer of the current in use descriptor pointer. The content of
690 * cur_desc is expected to be updated after the function execution.
692 * The last write back data element to be returned. It is used only in cipher
693 * and hash chain operations.
695 * The source data pointer
697 * The offset to both source and destination data. For source data the offset
698 * is the number of bytes between src and start point of cipher operation. For
699 * destination data the offset is the number of bytes from *cur_desc->addr
700 * to the point where the src will be written to.
701 * @param write_back_len
702 * The size of the write back length.
704 * The pointer to the start of the write back data linked list.
706 static struct vhost_crypto_writeback_data *
707 prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
708 struct vring_desc **cur_desc,
709 struct vhost_crypto_writeback_data **end_wb_data,
712 uint64_t write_back_len,
713 uint32_t *nb_descs, uint32_t vq_size)
715 struct vhost_crypto_writeback_data *wb_data, *head;
716 struct vring_desc *desc = *cur_desc;
721 ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
722 if (unlikely(ret < 0)) {
723 VC_LOG_ERR("no memory");
729 if (likely(desc->len > offset)) {
730 wb_data->src = src + offset;
732 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
733 &dlen, VHOST_ACCESS_RW) + offset;
734 if (unlikely(!dst || dlen != desc->len)) {
735 VC_LOG_ERR("Failed to map descriptor");
740 wb_data->len = desc->len - offset;
741 write_back_len -= wb_data->len;
742 src += offset + wb_data->len;
745 if (unlikely(write_back_len)) {
746 ret = rte_mempool_get(vc_req->wb_pool,
747 (void **)&(wb_data->next));
748 if (unlikely(ret < 0)) {
749 VC_LOG_ERR("no memory");
753 wb_data = wb_data->next;
755 wb_data->next = NULL;
759 while (write_back_len) {
760 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
761 VC_LOG_ERR("Invalid descriptors");
766 desc = &vc_req->head[desc->next];
767 if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
768 VC_LOG_ERR("incorrect descriptor");
772 if (desc->len <= offset) {
778 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
779 VHOST_ACCESS_RW) + offset;
780 if (unlikely(dst == NULL || dlen != desc->len)) {
781 VC_LOG_ERR("Failed to map descriptor");
787 wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
788 write_back_len -= wb_data->len;
792 if (write_back_len) {
793 ret = rte_mempool_get(vc_req->wb_pool,
794 (void **)&(wb_data->next));
795 if (unlikely(ret < 0)) {
796 VC_LOG_ERR("no memory");
800 wb_data = wb_data->next;
802 wb_data->next = NULL;
805 if (unlikely(*nb_descs == 0))
808 if (unlikely(desc->next >= vq_size))
810 *cur_desc = &vc_req->head[desc->next];
813 *end_wb_data = wb_data;
819 free_wb_data(head, vc_req->wb_pool);
825 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
826 struct vhost_crypto_data_req *vc_req,
827 struct virtio_crypto_cipher_data_req *cipher,
828 struct vring_desc *cur_desc,
829 uint32_t *nb_descs, uint32_t vq_size)
831 struct vring_desc *desc = cur_desc;
832 struct vhost_crypto_writeback_data *ewb = NULL;
833 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
834 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
839 if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
840 nb_descs, vq_size) < 0)) {
841 ret = VIRTIO_CRYPTO_BADMSG;
845 m_src->data_len = cipher->para.src_data_len;
847 switch (vcrypto->option) {
848 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
849 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
850 cipher->para.src_data_len);
851 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
852 if (unlikely(m_src->buf_iova == 0 ||
853 m_src->buf_addr == NULL)) {
854 VC_LOG_ERR("zero_copy may fail due to cross page data");
855 ret = VIRTIO_CRYPTO_ERR;
859 if (unlikely(move_desc(vc_req->head, &desc,
860 cipher->para.src_data_len, nb_descs,
862 VC_LOG_ERR("Incorrect descriptor");
863 ret = VIRTIO_CRYPTO_ERR;
868 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
869 vc_req->wb_pool = vcrypto->wb_pool;
871 if (unlikely(cipher->para.src_data_len >
872 RTE_MBUF_DEFAULT_BUF_SIZE)) {
873 VC_LOG_ERR("Not enough space to do data copy");
874 ret = VIRTIO_CRYPTO_ERR;
877 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
878 vc_req, &desc, cipher->para.src_data_len,
879 nb_descs, vq_size) < 0)) {
880 ret = VIRTIO_CRYPTO_BADMSG;
885 ret = VIRTIO_CRYPTO_BADMSG;
890 desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
891 if (unlikely(!desc)) {
892 VC_LOG_ERR("Cannot find write location");
893 ret = VIRTIO_CRYPTO_BADMSG;
897 switch (vcrypto->option) {
898 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
899 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
900 desc->addr, cipher->para.dst_data_len);
901 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
902 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
903 VC_LOG_ERR("zero_copy may fail due to cross page data");
904 ret = VIRTIO_CRYPTO_ERR;
908 if (unlikely(move_desc(vc_req->head, &desc,
909 cipher->para.dst_data_len,
910 nb_descs, vq_size) < 0)) {
911 VC_LOG_ERR("Incorrect descriptor");
912 ret = VIRTIO_CRYPTO_ERR;
916 m_dst->data_len = cipher->para.dst_data_len;
918 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
919 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
920 rte_pktmbuf_mtod(m_src, uint8_t *), 0,
921 cipher->para.dst_data_len, nb_descs, vq_size);
922 if (unlikely(vc_req->wb == NULL)) {
923 ret = VIRTIO_CRYPTO_ERR;
929 ret = VIRTIO_CRYPTO_BADMSG;
934 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
935 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
937 op->sym->cipher.data.offset = 0;
938 op->sym->cipher.data.length = cipher->para.src_data_len;
940 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
941 if (unlikely(vc_req->inhdr == NULL)) {
942 ret = VIRTIO_CRYPTO_BADMSG;
946 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
947 vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
953 free_wb_data(vc_req->wb, vc_req->wb_pool);
955 vc_req->len = INHDR_LEN;
960 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
961 struct vhost_crypto_data_req *vc_req,
962 struct virtio_crypto_alg_chain_data_req *chain,
963 struct vring_desc *cur_desc,
964 uint32_t *nb_descs, uint32_t vq_size)
966 struct vring_desc *desc = cur_desc, *digest_desc;
967 struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
968 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
969 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
970 uint32_t digest_offset;
976 if (unlikely(copy_data(iv_data, vc_req, &desc,
977 chain->para.iv_len, nb_descs, vq_size) < 0)) {
978 ret = VIRTIO_CRYPTO_BADMSG;
982 m_src->data_len = chain->para.src_data_len;
984 switch (vcrypto->option) {
985 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
986 m_dst->data_len = chain->para.dst_data_len;
988 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
989 chain->para.src_data_len);
990 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
991 if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
992 VC_LOG_ERR("zero_copy may fail due to cross page data");
993 ret = VIRTIO_CRYPTO_ERR;
997 if (unlikely(move_desc(vc_req->head, &desc,
998 chain->para.src_data_len,
999 nb_descs, vq_size) < 0)) {
1000 VC_LOG_ERR("Incorrect descriptor");
1001 ret = VIRTIO_CRYPTO_ERR;
1005 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1006 vc_req->wb_pool = vcrypto->wb_pool;
1008 if (unlikely(chain->para.src_data_len >
1009 RTE_MBUF_DEFAULT_BUF_SIZE)) {
1010 VC_LOG_ERR("Not enough space to do data copy");
1011 ret = VIRTIO_CRYPTO_ERR;
1014 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
1015 vc_req, &desc, chain->para.src_data_len,
1016 nb_descs, vq_size) < 0)) {
1017 ret = VIRTIO_CRYPTO_BADMSG;
1023 ret = VIRTIO_CRYPTO_BADMSG;
1028 desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
1029 if (unlikely(!desc)) {
1030 VC_LOG_ERR("Cannot find write location");
1031 ret = VIRTIO_CRYPTO_BADMSG;
1035 switch (vcrypto->option) {
1036 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1037 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
1038 desc->addr, chain->para.dst_data_len);
1039 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
1040 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
1041 VC_LOG_ERR("zero_copy may fail due to cross page data");
1042 ret = VIRTIO_CRYPTO_ERR;
1046 if (unlikely(move_desc(vc_req->head, &desc,
1047 chain->para.dst_data_len,
1048 nb_descs, vq_size) < 0)) {
1049 VC_LOG_ERR("Incorrect descriptor");
1050 ret = VIRTIO_CRYPTO_ERR;
1054 op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
1055 desc->addr, chain->para.hash_result_len);
1056 op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
1058 if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
1059 VC_LOG_ERR("zero_copy may fail due to cross page data");
1060 ret = VIRTIO_CRYPTO_ERR;
1064 if (unlikely(move_desc(vc_req->head, &desc,
1065 chain->para.hash_result_len,
1066 nb_descs, vq_size) < 0)) {
1067 VC_LOG_ERR("Incorrect descriptor");
1068 ret = VIRTIO_CRYPTO_ERR;
1073 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1074 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
1075 rte_pktmbuf_mtod(m_src, uint8_t *),
1076 chain->para.cipher_start_src_offset,
1077 chain->para.dst_data_len -
1078 chain->para.cipher_start_src_offset,
1080 if (unlikely(vc_req->wb == NULL)) {
1081 ret = VIRTIO_CRYPTO_ERR;
1085 digest_offset = m_src->data_len;
1086 digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
1090 /** create a wb_data for digest */
1091 ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
1092 digest_addr, 0, chain->para.hash_result_len,
1094 if (unlikely(ewb->next == NULL)) {
1095 ret = VIRTIO_CRYPTO_ERR;
1099 if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
1100 chain->para.hash_result_len,
1101 nb_descs, vq_size) < 0)) {
1102 ret = VIRTIO_CRYPTO_BADMSG;
1106 op->sym->auth.digest.data = digest_addr;
1107 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
1111 ret = VIRTIO_CRYPTO_BADMSG;
1116 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
1117 if (unlikely(vc_req->inhdr == NULL)) {
1118 ret = VIRTIO_CRYPTO_BADMSG;
1122 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
1124 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1125 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1127 op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
1128 op->sym->cipher.data.length = chain->para.src_data_len -
1129 chain->para.cipher_start_src_offset;
1131 op->sym->auth.data.offset = chain->para.hash_start_src_offset;
1132 op->sym->auth.data.length = chain->para.len_to_hash;
1134 vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
1140 free_wb_data(vc_req->wb, vc_req->wb_pool);
1141 vc_req->len = INHDR_LEN;
1146 * Process on descriptor
1148 static __rte_always_inline int
1149 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
1150 struct vhost_virtqueue *vq, struct rte_crypto_op *op,
1151 struct vring_desc *head, uint16_t desc_idx)
1153 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
1154 struct rte_cryptodev_sym_session *session;
1155 struct virtio_crypto_op_data_req *req, tmp_req;
1156 struct virtio_crypto_inhdr *inhdr;
1157 struct vring_desc *desc = NULL;
1158 uint64_t session_id;
1160 uint32_t nb_descs = vq->size;
1163 vc_req->desc_idx = desc_idx;
1164 vc_req->dev = vcrypto->dev;
1167 if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
1169 nb_descs = dlen / sizeof(struct vring_desc);
1170 /* drop invalid descriptors */
1171 if (unlikely(nb_descs > vq->size))
1173 desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
1174 &dlen, VHOST_ACCESS_RO);
1175 if (unlikely(!desc || dlen != head->len))
1183 vc_req->head = head;
1184 vc_req->zero_copy = vcrypto->option;
1186 req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
1187 if (unlikely(req == NULL)) {
1188 switch (vcrypto->option) {
1189 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1190 err = VIRTIO_CRYPTO_BADMSG;
1191 VC_LOG_ERR("Invalid descriptor");
1193 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1195 if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
1196 &nb_descs, vq->size) < 0)) {
1197 err = VIRTIO_CRYPTO_BADMSG;
1198 VC_LOG_ERR("Invalid descriptor");
1203 err = VIRTIO_CRYPTO_ERR;
1204 VC_LOG_ERR("Invalid option");
1208 if (unlikely(move_desc(vc_req->head, &desc,
1209 sizeof(*req), &nb_descs, vq->size) < 0)) {
1210 VC_LOG_ERR("Incorrect descriptor");
1215 switch (req->header.opcode) {
1216 case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
1217 case VIRTIO_CRYPTO_CIPHER_DECRYPT:
1218 session_id = req->header.session_id;
1220 /* one branch to avoid unnecessary table lookup */
1221 if (vcrypto->cache_session_id != session_id) {
1222 err = rte_hash_lookup_data(vcrypto->session_map,
1223 &session_id, (void **)&session);
1224 if (unlikely(err < 0)) {
1225 err = VIRTIO_CRYPTO_ERR;
1226 VC_LOG_ERR("Failed to find session %"PRIu64,
1231 vcrypto->cache_session = session;
1232 vcrypto->cache_session_id = session_id;
1235 session = vcrypto->cache_session;
1237 err = rte_crypto_op_attach_sym_session(op, session);
1238 if (unlikely(err < 0)) {
1239 err = VIRTIO_CRYPTO_ERR;
1240 VC_LOG_ERR("Failed to attach session to op");
1244 switch (req->u.sym_req.op_type) {
1245 case VIRTIO_CRYPTO_SYM_OP_NONE:
1246 err = VIRTIO_CRYPTO_NOTSUPP;
1248 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1249 err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1250 &req->u.sym_req.u.cipher, desc,
1251 &nb_descs, vq->size);
1253 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1254 err = prepare_sym_chain_op(vcrypto, op, vc_req,
1255 &req->u.sym_req.u.chain, desc,
1256 &nb_descs, vq->size);
1259 if (unlikely(err != 0)) {
1260 VC_LOG_ERR("Failed to process sym request");
1265 VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1266 req->header.opcode);
1274 inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size);
1275 if (likely(inhdr != NULL))
1276 inhdr->status = (uint8_t)err;
1281 static __rte_always_inline struct vhost_virtqueue *
1282 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1283 struct vhost_virtqueue *old_vq)
1285 struct rte_mbuf *m_src = op->sym->m_src;
1286 struct rte_mbuf *m_dst = op->sym->m_dst;
1287 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1290 if (unlikely(!vc_req)) {
1291 VC_LOG_ERR("Failed to retrieve vc_req");
1295 if (old_vq && (vc_req->vq != old_vq))
1298 desc_idx = vc_req->desc_idx;
1300 if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1301 vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1303 if (vc_req->zero_copy == 0)
1304 write_back_data(vc_req);
1307 vc_req->vq->used->ring[desc_idx].id = desc_idx;
1308 vc_req->vq->used->ring[desc_idx].len = vc_req->len;
1310 rte_mempool_put(m_src->pool, (void *)m_src);
1313 rte_mempool_put(m_dst->pool, (void *)m_dst);
1318 static __rte_always_inline uint16_t
1319 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1320 uint16_t nb_ops, int *callfd)
1322 uint16_t processed = 1;
1323 struct vhost_virtqueue *vq, *tmp_vq;
1325 if (unlikely(nb_ops == 0))
1328 vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1329 if (unlikely(vq == NULL))
1333 while ((processed < nb_ops)) {
1334 tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1337 if (unlikely(vq != tmp_vq))
1343 *callfd = vq->callfd;
1345 *(volatile uint16_t *)&vq->used->idx += processed;
1351 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1352 struct rte_mempool *sess_pool,
1353 struct rte_mempool *sess_priv_pool,
1356 struct virtio_net *dev = get_device(vid);
1357 struct rte_hash_parameters params = {0};
1358 struct vhost_crypto *vcrypto;
1363 VC_LOG_ERR("Invalid vid %i", vid);
1367 ret = rte_vhost_driver_set_features(dev->ifname,
1368 VIRTIO_CRYPTO_FEATURES);
1370 VC_LOG_ERR("Error setting features");
1374 vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1375 RTE_CACHE_LINE_SIZE, socket_id);
1377 VC_LOG_ERR("Insufficient memory");
1381 vcrypto->sess_pool = sess_pool;
1382 vcrypto->sess_priv_pool = sess_priv_pool;
1383 vcrypto->cid = cryptodev_id;
1384 vcrypto->cache_session_id = UINT64_MAX;
1385 vcrypto->last_session_id = 1;
1387 vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1389 snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1391 params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1392 params.hash_func = rte_jhash;
1393 params.key_len = sizeof(uint64_t);
1394 params.socket_id = socket_id;
1395 vcrypto->session_map = rte_hash_create(¶ms);
1396 if (!vcrypto->session_map) {
1397 VC_LOG_ERR("Failed to creath session map");
1402 snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1403 vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1404 VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1405 sizeof(struct vhost_crypto_data_req),
1406 RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM,
1408 if (!vcrypto->mbuf_pool) {
1409 VC_LOG_ERR("Failed to creath mbuf pool");
1414 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1415 vcrypto->wb_pool = rte_mempool_create(name,
1416 VHOST_CRYPTO_MBUF_POOL_SIZE,
1417 sizeof(struct vhost_crypto_writeback_data),
1418 128, 0, NULL, NULL, NULL, NULL,
1419 rte_socket_id(), 0);
1420 if (!vcrypto->wb_pool) {
1421 VC_LOG_ERR("Failed to creath mempool");
1426 dev->extern_data = vcrypto;
1427 dev->extern_ops.pre_msg_handle = NULL;
1428 dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1433 if (vcrypto->session_map)
1434 rte_hash_free(vcrypto->session_map);
1435 if (vcrypto->mbuf_pool)
1436 rte_mempool_free(vcrypto->mbuf_pool);
1444 rte_vhost_crypto_free(int vid)
1446 struct virtio_net *dev = get_device(vid);
1447 struct vhost_crypto *vcrypto;
1449 if (unlikely(dev == NULL)) {
1450 VC_LOG_ERR("Invalid vid %i", vid);
1454 vcrypto = dev->extern_data;
1455 if (unlikely(vcrypto == NULL)) {
1456 VC_LOG_ERR("Cannot find required data, is it initialized?");
1460 rte_hash_free(vcrypto->session_map);
1461 rte_mempool_free(vcrypto->mbuf_pool);
1462 rte_mempool_free(vcrypto->wb_pool);
1465 dev->extern_data = NULL;
1466 dev->extern_ops.pre_msg_handle = NULL;
1467 dev->extern_ops.post_msg_handle = NULL;
1473 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1475 struct virtio_net *dev = get_device(vid);
1476 struct vhost_crypto *vcrypto;
1478 if (unlikely(dev == NULL)) {
1479 VC_LOG_ERR("Invalid vid %i", vid);
1483 if (unlikely((uint32_t)option >=
1484 RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1485 VC_LOG_ERR("Invalid option %i", option);
1489 vcrypto = (struct vhost_crypto *)dev->extern_data;
1490 if (unlikely(vcrypto == NULL)) {
1491 VC_LOG_ERR("Cannot find required data, is it initialized?");
1495 if (vcrypto->option == (uint8_t)option)
1498 if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
1499 !(rte_mempool_full(vcrypto->wb_pool))) {
1500 VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1504 if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
1507 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1508 vcrypto->wb_pool = rte_mempool_create(name,
1509 VHOST_CRYPTO_MBUF_POOL_SIZE,
1510 sizeof(struct vhost_crypto_writeback_data),
1511 128, 0, NULL, NULL, NULL, NULL,
1512 rte_socket_id(), 0);
1513 if (!vcrypto->wb_pool) {
1514 VC_LOG_ERR("Failed to creath mbuf pool");
1518 rte_mempool_free(vcrypto->wb_pool);
1519 vcrypto->wb_pool = NULL;
1522 vcrypto->option = (uint8_t)option;
1528 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1529 struct rte_crypto_op **ops, uint16_t nb_ops)
1531 struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1532 struct virtio_net *dev = get_device(vid);
1533 struct vhost_crypto *vcrypto;
1534 struct vhost_virtqueue *vq;
1540 if (unlikely(dev == NULL)) {
1541 VC_LOG_ERR("Invalid vid %i", vid);
1545 if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1546 VC_LOG_ERR("Invalid qid %u", qid);
1550 vcrypto = (struct vhost_crypto *)dev->extern_data;
1551 if (unlikely(vcrypto == NULL)) {
1552 VC_LOG_ERR("Cannot find required data, is it initialized?");
1556 vq = dev->virtqueue[qid];
1558 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1559 start_idx = vq->last_used_idx;
1560 count = avail_idx - start_idx;
1561 count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1562 count = RTE_MIN(count, nb_ops);
1564 if (unlikely(count == 0))
1567 /* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1568 * we need only 1 mbuf as src and dst
1570 switch (vcrypto->option) {
1571 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1572 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1573 (void **)mbufs, count * 2) < 0)) {
1574 VC_LOG_ERR("Insufficient memory");
1578 for (i = 0; i < count; i++) {
1579 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1580 uint16_t desc_idx = vq->avail->ring[used_idx];
1581 struct vring_desc *head = &vq->desc[desc_idx];
1582 struct rte_crypto_op *op = ops[i];
1584 op->sym->m_src = mbufs[i * 2];
1585 op->sym->m_dst = mbufs[i * 2 + 1];
1586 op->sym->m_src->data_off = 0;
1587 op->sym->m_dst->data_off = 0;
1589 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1590 op, head, desc_idx) < 0))
1594 if (unlikely(i < count))
1595 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1596 (void **)&mbufs[i * 2],
1601 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1602 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1603 (void **)mbufs, count) < 0)) {
1604 VC_LOG_ERR("Insufficient memory");
1608 for (i = 0; i < count; i++) {
1609 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1610 uint16_t desc_idx = vq->avail->ring[used_idx];
1611 struct vring_desc *head = &vq->desc[desc_idx];
1612 struct rte_crypto_op *op = ops[i];
1614 op->sym->m_src = mbufs[i];
1615 op->sym->m_dst = NULL;
1616 op->sym->m_src->data_off = 0;
1618 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1619 op, head, desc_idx) < 0))
1623 if (unlikely(i < count))
1624 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1632 vq->last_used_idx += i;
1638 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1639 uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1641 struct rte_crypto_op **tmp_ops = ops;
1642 uint16_t count = 0, left = nb_ops;
1647 count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1649 if (unlikely(count == 0))
1652 tmp_ops = &tmp_ops[count];
1655 callfds[idx++] = callfd;
1657 if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1658 VC_LOG_ERR("Too many vqs");
1665 return nb_ops - left;