1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
4 #include <rte_malloc.h>
8 #include <rte_cryptodev.h>
10 #include "rte_vhost_crypto.h"
12 #include "vhost_user.h"
13 #include "virtio_crypto.h"
15 #define INHDR_LEN (sizeof(struct virtio_crypto_inhdr))
16 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
17 sizeof(struct rte_crypto_sym_op))
19 #ifdef RTE_LIBRTE_VHOST_DEBUG
20 #define VC_LOG_ERR(fmt, args...) \
21 RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n", \
22 "Vhost-Crypto", __func__, __LINE__, ## args)
23 #define VC_LOG_INFO(fmt, args...) \
24 RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n", \
25 "Vhost-Crypto", __func__, __LINE__, ## args)
27 #define VC_LOG_DBG(fmt, args...) \
28 RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n", \
29 "Vhost-Crypto", __func__, __LINE__, ## args)
31 #define VC_LOG_ERR(fmt, args...) \
32 RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
33 #define VC_LOG_INFO(fmt, args...) \
34 RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
35 #define VC_LOG_DBG(fmt, args...)
38 #define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) | \
39 (1 << VIRTIO_RING_F_INDIRECT_DESC) | \
40 (1 << VIRTIO_RING_F_EVENT_IDX) | \
41 (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) | \
42 (1 << VIRTIO_CRYPTO_SERVICE_MAC) | \
43 (1 << VIRTIO_NET_F_CTRL_VQ) | \
44 (1 << VHOST_USER_PROTOCOL_F_CONFIG))
46 #define IOVA_TO_VVA(t, r, a, l, p) \
47 ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
50 cipher_algo_transform(uint32_t virtio_cipher_algo,
51 enum rte_crypto_cipher_algorithm *algo)
53 switch (virtio_cipher_algo) {
54 case VIRTIO_CRYPTO_CIPHER_AES_CBC:
55 *algo = RTE_CRYPTO_CIPHER_AES_CBC;
57 case VIRTIO_CRYPTO_CIPHER_AES_CTR:
58 *algo = RTE_CRYPTO_CIPHER_AES_CTR;
60 case VIRTIO_CRYPTO_CIPHER_DES_ECB:
61 *algo = -VIRTIO_CRYPTO_NOTSUPP;
63 case VIRTIO_CRYPTO_CIPHER_DES_CBC:
64 *algo = RTE_CRYPTO_CIPHER_DES_CBC;
66 case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
67 *algo = RTE_CRYPTO_CIPHER_3DES_ECB;
69 case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
70 *algo = RTE_CRYPTO_CIPHER_3DES_CBC;
72 case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
73 *algo = RTE_CRYPTO_CIPHER_3DES_CTR;
75 case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
76 *algo = RTE_CRYPTO_CIPHER_KASUMI_F8;
78 case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
79 *algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
81 case VIRTIO_CRYPTO_CIPHER_AES_F8:
82 *algo = RTE_CRYPTO_CIPHER_AES_F8;
84 case VIRTIO_CRYPTO_CIPHER_AES_XTS:
85 *algo = RTE_CRYPTO_CIPHER_AES_XTS;
87 case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
88 *algo = RTE_CRYPTO_CIPHER_ZUC_EEA3;
91 return -VIRTIO_CRYPTO_BADMSG;
99 auth_algo_transform(uint32_t virtio_auth_algo,
100 enum rte_crypto_auth_algorithm *algo)
102 switch (virtio_auth_algo) {
103 case VIRTIO_CRYPTO_NO_MAC:
104 *algo = RTE_CRYPTO_AUTH_NULL;
106 case VIRTIO_CRYPTO_MAC_HMAC_MD5:
107 *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
109 case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
110 *algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
112 case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
113 *algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
115 case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
116 *algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
118 case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
119 *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
121 case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
122 *algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
124 case VIRTIO_CRYPTO_MAC_CMAC_AES:
125 *algo = RTE_CRYPTO_AUTH_AES_CMAC;
127 case VIRTIO_CRYPTO_MAC_KASUMI_F9:
128 *algo = RTE_CRYPTO_AUTH_KASUMI_F9;
130 case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
131 *algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
133 case VIRTIO_CRYPTO_MAC_GMAC_AES:
134 *algo = RTE_CRYPTO_AUTH_AES_GMAC;
136 case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
137 *algo = RTE_CRYPTO_AUTH_AES_CBC_MAC;
139 case VIRTIO_CRYPTO_MAC_XCBC_AES:
140 *algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
142 case VIRTIO_CRYPTO_MAC_CMAC_3DES:
143 case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
144 case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
145 return -VIRTIO_CRYPTO_NOTSUPP;
147 return -VIRTIO_CRYPTO_BADMSG;
153 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
158 case RTE_CRYPTO_CIPHER_3DES_CBC:
161 case RTE_CRYPTO_CIPHER_3DES_CTR:
164 case RTE_CRYPTO_CIPHER_3DES_ECB:
167 case RTE_CRYPTO_CIPHER_AES_CBC:
171 /* TODO: add common algos */
182 * vhost_crypto struct is used to maintain a number of virtio_cryptos and
183 * one DPDK crypto device that deals with all crypto workloads. It is declared
184 * here and defined in vhost_crypto.c
186 struct vhost_crypto {
187 /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
190 struct rte_hash *session_map;
191 struct rte_mempool *mbuf_pool;
192 struct rte_mempool *sess_pool;
193 struct rte_mempool *sess_priv_pool;
194 struct rte_mempool *wb_pool;
196 /** DPDK cryptodev ID */
200 uint64_t last_session_id;
202 uint64_t cache_session_id;
203 struct rte_cryptodev_sym_session *cache_session;
204 /** socket id for the device */
207 struct virtio_net *dev;
210 } __rte_cache_aligned;
212 struct vhost_crypto_writeback_data {
216 struct vhost_crypto_writeback_data *next;
219 struct vhost_crypto_data_req {
220 struct vring_desc *head;
221 struct virtio_net *dev;
222 struct virtio_crypto_inhdr *inhdr;
223 struct vhost_virtqueue *vq;
224 struct vhost_crypto_writeback_data *wb;
225 struct rte_mempool *wb_pool;
232 transform_cipher_param(struct rte_crypto_sym_xform *xform,
233 VhostUserCryptoSessionParam *param)
237 ret = cipher_algo_transform(param->cipher_algo, &xform->cipher.algo);
238 if (unlikely(ret < 0))
241 if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
242 VC_LOG_DBG("Invalid cipher key length\n");
243 return -VIRTIO_CRYPTO_BADMSG;
246 xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
247 xform->cipher.key.length = param->cipher_key_len;
248 if (xform->cipher.key.length > 0)
249 xform->cipher.key.data = param->cipher_key_buf;
250 if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
251 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
252 else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
253 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
255 VC_LOG_DBG("Bad operation type");
256 return -VIRTIO_CRYPTO_BADMSG;
259 ret = get_iv_len(xform->cipher.algo);
260 if (unlikely(ret < 0))
262 xform->cipher.iv.length = (uint16_t)ret;
263 xform->cipher.iv.offset = IV_OFFSET;
268 transform_chain_param(struct rte_crypto_sym_xform *xforms,
269 VhostUserCryptoSessionParam *param)
271 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
274 switch (param->chaining_dir) {
275 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
277 xform_cipher = xforms->next;
278 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
279 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
281 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
282 xform_cipher = xforms;
283 xform_auth = xforms->next;
284 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
285 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
288 return -VIRTIO_CRYPTO_BADMSG;
292 ret = cipher_algo_transform(param->cipher_algo,
293 &xform_cipher->cipher.algo);
294 if (unlikely(ret < 0))
297 if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
298 VC_LOG_DBG("Invalid cipher key length\n");
299 return -VIRTIO_CRYPTO_BADMSG;
302 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
303 xform_cipher->cipher.key.length = param->cipher_key_len;
304 xform_cipher->cipher.key.data = param->cipher_key_buf;
305 ret = get_iv_len(xform_cipher->cipher.algo);
306 if (unlikely(ret < 0))
308 xform_cipher->cipher.iv.length = (uint16_t)ret;
309 xform_cipher->cipher.iv.offset = IV_OFFSET;
312 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
313 ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo);
314 if (unlikely(ret < 0))
317 if (param->auth_key_len > VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH) {
318 VC_LOG_DBG("Invalid auth key length\n");
319 return -VIRTIO_CRYPTO_BADMSG;
322 xform_auth->auth.digest_length = param->digest_len;
323 xform_auth->auth.key.length = param->auth_key_len;
324 xform_auth->auth.key.data = param->auth_key_buf;
330 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
331 VhostUserCryptoSessionParam *sess_param)
333 struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
334 struct rte_cryptodev_sym_session *session;
337 switch (sess_param->op_type) {
338 case VIRTIO_CRYPTO_SYM_OP_NONE:
339 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
340 ret = transform_cipher_param(&xform1, sess_param);
342 VC_LOG_ERR("Error transform session msg (%i)", ret);
343 sess_param->session_id = ret;
347 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
348 if (unlikely(sess_param->hash_mode !=
349 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
350 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
351 VC_LOG_ERR("Error transform session message (%i)",
352 -VIRTIO_CRYPTO_NOTSUPP);
356 xform1.next = &xform2;
358 ret = transform_chain_param(&xform1, sess_param);
360 VC_LOG_ERR("Error transform session message (%i)", ret);
361 sess_param->session_id = ret;
367 VC_LOG_ERR("Algorithm not yet supported");
368 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
372 session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
374 VC_LOG_ERR("Failed to create session");
375 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
379 if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
380 vcrypto->sess_priv_pool) < 0) {
381 VC_LOG_ERR("Failed to initialize session");
382 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
386 /* insert hash to map */
387 if (rte_hash_add_key_data(vcrypto->session_map,
388 &vcrypto->last_session_id, session) < 0) {
389 VC_LOG_ERR("Failed to insert session to hash table");
391 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
392 VC_LOG_ERR("Failed to clear session");
394 if (rte_cryptodev_sym_session_free(session) < 0)
395 VC_LOG_ERR("Failed to free session");
397 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
401 VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
402 vcrypto->last_session_id, vcrypto->dev->vid);
404 sess_param->session_id = vcrypto->last_session_id;
405 vcrypto->last_session_id++;
409 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
411 struct rte_cryptodev_sym_session *session;
412 uint64_t sess_id = session_id;
415 ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
418 if (unlikely(ret < 0)) {
419 VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
420 return -VIRTIO_CRYPTO_INVSESS;
423 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
424 VC_LOG_DBG("Failed to clear session");
425 return -VIRTIO_CRYPTO_ERR;
428 if (rte_cryptodev_sym_session_free(session) < 0) {
429 VC_LOG_DBG("Failed to free session");
430 return -VIRTIO_CRYPTO_ERR;
433 if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
434 VC_LOG_DBG("Failed to delete session from hash table.");
435 return -VIRTIO_CRYPTO_ERR;
438 VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
444 static enum rte_vhost_msg_result
445 vhost_crypto_msg_post_handler(int vid, void *msg)
447 struct virtio_net *dev = get_device(vid);
448 struct vhost_crypto *vcrypto;
449 VhostUserMsg *vmsg = msg;
450 enum rte_vhost_msg_result ret = RTE_VHOST_MSG_RESULT_OK;
453 VC_LOG_ERR("Invalid vid %i", vid);
454 return RTE_VHOST_MSG_RESULT_ERR;
457 vcrypto = dev->extern_data;
458 if (vcrypto == NULL) {
459 VC_LOG_ERR("Cannot find required data, is it initialized?");
460 return RTE_VHOST_MSG_RESULT_ERR;
463 switch (vmsg->request.master) {
464 case VHOST_USER_CRYPTO_CREATE_SESS:
465 vhost_crypto_create_sess(vcrypto,
466 &vmsg->payload.crypto_session);
468 ret = RTE_VHOST_MSG_RESULT_REPLY;
470 case VHOST_USER_CRYPTO_CLOSE_SESS:
471 if (vhost_crypto_close_sess(vcrypto, vmsg->payload.u64))
472 ret = RTE_VHOST_MSG_RESULT_ERR;
475 ret = RTE_VHOST_MSG_RESULT_NOT_HANDLED;
482 static __rte_always_inline struct vring_desc *
483 find_write_desc(struct vring_desc *head, struct vring_desc *desc,
484 uint32_t *nb_descs, uint32_t vq_size)
486 if (desc->flags & VRING_DESC_F_WRITE)
489 while (desc->flags & VRING_DESC_F_NEXT) {
490 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
494 desc = &head[desc->next];
495 if (desc->flags & VRING_DESC_F_WRITE)
502 static struct virtio_crypto_inhdr *
503 reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
504 uint32_t *nb_descs, uint32_t vq_size)
507 struct virtio_crypto_inhdr *inhdr;
509 while (desc->flags & VRING_DESC_F_NEXT) {
510 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
513 desc = &vc_req->head[desc->next];
517 inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
518 &dlen, VHOST_ACCESS_WO);
519 if (unlikely(!inhdr || dlen != desc->len))
525 static __rte_always_inline int
526 move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
527 uint32_t size, uint32_t *nb_descs, uint32_t vq_size)
529 struct vring_desc *desc = *cur_desc;
530 int left = size - desc->len;
532 while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
534 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
537 desc = &head[desc->next];
538 rte_prefetch0(&head[desc->next]);
542 if (unlikely(left > 0))
545 if (unlikely(*nb_descs == 0))
548 if (unlikely(desc->next >= vq_size))
550 *cur_desc = &head[desc->next];
556 static __rte_always_inline void *
557 get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
561 uint64_t dlen = cur_desc->len;
563 data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
564 if (unlikely(!data || dlen != cur_desc->len)) {
565 VC_LOG_ERR("Failed to map object");
573 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
574 struct vring_desc **cur_desc, uint32_t size,
575 uint32_t *nb_descs, uint32_t vq_size)
577 struct vring_desc *desc = *cur_desc;
578 uint64_t remain, addr, dlen, len;
580 uint8_t *data = dst_data;
584 to_copy = RTE_MIN(desc->len, (uint32_t)left);
586 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
588 if (unlikely(!src || !dlen))
591 rte_memcpy((uint8_t *)data, src, dlen);
594 if (unlikely(dlen < to_copy)) {
595 remain = to_copy - dlen;
596 addr = desc->addr + dlen;
600 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
602 if (unlikely(!src || !len)) {
603 VC_LOG_ERR("Failed to map descriptor");
607 rte_memcpy(data, src, len);
616 while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
617 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
618 VC_LOG_ERR("Invalid descriptors");
623 desc = &vc_req->head[desc->next];
624 rte_prefetch0(&vc_req->head[desc->next]);
625 to_copy = RTE_MIN(desc->len, (uint32_t)left);
627 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
629 if (unlikely(!src || !dlen)) {
630 VC_LOG_ERR("Failed to map descriptor");
634 rte_memcpy(data, src, dlen);
637 if (unlikely(dlen < to_copy)) {
638 remain = to_copy - dlen;
639 addr = desc->addr + dlen;
643 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
645 if (unlikely(!src || !len)) {
646 VC_LOG_ERR("Failed to map descriptor");
650 rte_memcpy(data, src, len);
660 if (unlikely(left > 0)) {
661 VC_LOG_ERR("Incorrect virtio descriptor");
665 if (unlikely(*nb_descs == 0))
668 if (unlikely(desc->next >= vq_size))
670 *cur_desc = &vc_req->head[desc->next];
677 write_back_data(struct vhost_crypto_data_req *vc_req)
679 struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
682 rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
684 wb_data = wb_data->next;
685 rte_mempool_put(vc_req->wb_pool, wb_last);
690 free_wb_data(struct vhost_crypto_writeback_data *wb_data,
691 struct rte_mempool *mp)
693 while (wb_data->next != NULL)
694 free_wb_data(wb_data->next, mp);
696 rte_mempool_put(mp, wb_data);
700 * The function will allocate a vhost_crypto_writeback_data linked list
701 * containing the source and destination data pointers for the write back
702 * operation after dequeued from Cryptodev PMD queues.
705 * The vhost crypto data request pointer
707 * The pointer of the current in use descriptor pointer. The content of
708 * cur_desc is expected to be updated after the function execution.
710 * The last write back data element to be returned. It is used only in cipher
711 * and hash chain operations.
713 * The source data pointer
715 * The offset to both source and destination data. For source data the offset
716 * is the number of bytes between src and start point of cipher operation. For
717 * destination data the offset is the number of bytes from *cur_desc->addr
718 * to the point where the src will be written to.
719 * @param write_back_len
720 * The size of the write back length.
722 * The pointer to the start of the write back data linked list.
724 static struct vhost_crypto_writeback_data *
725 prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
726 struct vring_desc **cur_desc,
727 struct vhost_crypto_writeback_data **end_wb_data,
730 uint64_t write_back_len,
731 uint32_t *nb_descs, uint32_t vq_size)
733 struct vhost_crypto_writeback_data *wb_data, *head;
734 struct vring_desc *desc = *cur_desc;
739 ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
740 if (unlikely(ret < 0)) {
741 VC_LOG_ERR("no memory");
747 if (likely(desc->len > offset)) {
748 wb_data->src = src + offset;
750 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
751 &dlen, VHOST_ACCESS_RW) + offset;
752 if (unlikely(!dst || dlen != desc->len)) {
753 VC_LOG_ERR("Failed to map descriptor");
758 wb_data->len = desc->len - offset;
759 write_back_len -= wb_data->len;
760 src += offset + wb_data->len;
763 if (unlikely(write_back_len)) {
764 ret = rte_mempool_get(vc_req->wb_pool,
765 (void **)&(wb_data->next));
766 if (unlikely(ret < 0)) {
767 VC_LOG_ERR("no memory");
771 wb_data = wb_data->next;
773 wb_data->next = NULL;
777 while (write_back_len) {
778 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
779 VC_LOG_ERR("Invalid descriptors");
784 desc = &vc_req->head[desc->next];
785 if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
786 VC_LOG_ERR("incorrect descriptor");
790 if (desc->len <= offset) {
796 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
797 VHOST_ACCESS_RW) + offset;
798 if (unlikely(dst == NULL || dlen != desc->len)) {
799 VC_LOG_ERR("Failed to map descriptor");
805 wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
806 write_back_len -= wb_data->len;
810 if (write_back_len) {
811 ret = rte_mempool_get(vc_req->wb_pool,
812 (void **)&(wb_data->next));
813 if (unlikely(ret < 0)) {
814 VC_LOG_ERR("no memory");
818 wb_data = wb_data->next;
820 wb_data->next = NULL;
823 if (unlikely(*nb_descs == 0))
826 if (unlikely(desc->next >= vq_size))
828 *cur_desc = &vc_req->head[desc->next];
831 *end_wb_data = wb_data;
837 free_wb_data(head, vc_req->wb_pool);
843 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
844 struct vhost_crypto_data_req *vc_req,
845 struct virtio_crypto_cipher_data_req *cipher,
846 struct vring_desc *cur_desc,
847 uint32_t *nb_descs, uint32_t vq_size)
849 struct vring_desc *desc = cur_desc;
850 struct vhost_crypto_writeback_data *ewb = NULL;
851 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
852 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
857 if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
858 nb_descs, vq_size) < 0)) {
859 ret = VIRTIO_CRYPTO_BADMSG;
863 m_src->data_len = cipher->para.src_data_len;
865 switch (vcrypto->option) {
866 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
867 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
868 cipher->para.src_data_len);
869 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
870 if (unlikely(m_src->buf_iova == 0 ||
871 m_src->buf_addr == NULL)) {
872 VC_LOG_ERR("zero_copy may fail due to cross page data");
873 ret = VIRTIO_CRYPTO_ERR;
877 if (unlikely(move_desc(vc_req->head, &desc,
878 cipher->para.src_data_len, nb_descs,
880 VC_LOG_ERR("Incorrect descriptor");
881 ret = VIRTIO_CRYPTO_ERR;
886 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
887 vc_req->wb_pool = vcrypto->wb_pool;
889 if (unlikely(cipher->para.src_data_len >
890 RTE_MBUF_DEFAULT_BUF_SIZE)) {
891 VC_LOG_ERR("Not enough space to do data copy");
892 ret = VIRTIO_CRYPTO_ERR;
895 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
896 vc_req, &desc, cipher->para.src_data_len,
897 nb_descs, vq_size) < 0)) {
898 ret = VIRTIO_CRYPTO_BADMSG;
903 ret = VIRTIO_CRYPTO_BADMSG;
908 desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
909 if (unlikely(!desc)) {
910 VC_LOG_ERR("Cannot find write location");
911 ret = VIRTIO_CRYPTO_BADMSG;
915 switch (vcrypto->option) {
916 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
917 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
918 desc->addr, cipher->para.dst_data_len);
919 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
920 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
921 VC_LOG_ERR("zero_copy may fail due to cross page data");
922 ret = VIRTIO_CRYPTO_ERR;
926 if (unlikely(move_desc(vc_req->head, &desc,
927 cipher->para.dst_data_len,
928 nb_descs, vq_size) < 0)) {
929 VC_LOG_ERR("Incorrect descriptor");
930 ret = VIRTIO_CRYPTO_ERR;
934 m_dst->data_len = cipher->para.dst_data_len;
936 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
937 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
938 rte_pktmbuf_mtod(m_src, uint8_t *), 0,
939 cipher->para.dst_data_len, nb_descs, vq_size);
940 if (unlikely(vc_req->wb == NULL)) {
941 ret = VIRTIO_CRYPTO_ERR;
947 ret = VIRTIO_CRYPTO_BADMSG;
952 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
953 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
955 op->sym->cipher.data.offset = 0;
956 op->sym->cipher.data.length = cipher->para.src_data_len;
958 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
959 if (unlikely(vc_req->inhdr == NULL)) {
960 ret = VIRTIO_CRYPTO_BADMSG;
964 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
965 vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
971 free_wb_data(vc_req->wb, vc_req->wb_pool);
973 vc_req->len = INHDR_LEN;
978 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
979 struct vhost_crypto_data_req *vc_req,
980 struct virtio_crypto_alg_chain_data_req *chain,
981 struct vring_desc *cur_desc,
982 uint32_t *nb_descs, uint32_t vq_size)
984 struct vring_desc *desc = cur_desc, *digest_desc;
985 struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
986 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
987 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
988 uint32_t digest_offset;
994 if (unlikely(copy_data(iv_data, vc_req, &desc,
995 chain->para.iv_len, nb_descs, vq_size) < 0)) {
996 ret = VIRTIO_CRYPTO_BADMSG;
1000 m_src->data_len = chain->para.src_data_len;
1002 switch (vcrypto->option) {
1003 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1004 m_dst->data_len = chain->para.dst_data_len;
1006 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
1007 chain->para.src_data_len);
1008 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
1009 if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
1010 VC_LOG_ERR("zero_copy may fail due to cross page data");
1011 ret = VIRTIO_CRYPTO_ERR;
1015 if (unlikely(move_desc(vc_req->head, &desc,
1016 chain->para.src_data_len,
1017 nb_descs, vq_size) < 0)) {
1018 VC_LOG_ERR("Incorrect descriptor");
1019 ret = VIRTIO_CRYPTO_ERR;
1023 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1024 vc_req->wb_pool = vcrypto->wb_pool;
1026 if (unlikely(chain->para.src_data_len >
1027 RTE_MBUF_DEFAULT_BUF_SIZE)) {
1028 VC_LOG_ERR("Not enough space to do data copy");
1029 ret = VIRTIO_CRYPTO_ERR;
1032 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
1033 vc_req, &desc, chain->para.src_data_len,
1034 nb_descs, vq_size) < 0)) {
1035 ret = VIRTIO_CRYPTO_BADMSG;
1041 ret = VIRTIO_CRYPTO_BADMSG;
1046 desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
1047 if (unlikely(!desc)) {
1048 VC_LOG_ERR("Cannot find write location");
1049 ret = VIRTIO_CRYPTO_BADMSG;
1053 switch (vcrypto->option) {
1054 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1055 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
1056 desc->addr, chain->para.dst_data_len);
1057 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
1058 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
1059 VC_LOG_ERR("zero_copy may fail due to cross page data");
1060 ret = VIRTIO_CRYPTO_ERR;
1064 if (unlikely(move_desc(vc_req->head, &desc,
1065 chain->para.dst_data_len,
1066 nb_descs, vq_size) < 0)) {
1067 VC_LOG_ERR("Incorrect descriptor");
1068 ret = VIRTIO_CRYPTO_ERR;
1072 op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
1073 desc->addr, chain->para.hash_result_len);
1074 op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
1076 if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
1077 VC_LOG_ERR("zero_copy may fail due to cross page data");
1078 ret = VIRTIO_CRYPTO_ERR;
1082 if (unlikely(move_desc(vc_req->head, &desc,
1083 chain->para.hash_result_len,
1084 nb_descs, vq_size) < 0)) {
1085 VC_LOG_ERR("Incorrect descriptor");
1086 ret = VIRTIO_CRYPTO_ERR;
1091 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1092 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
1093 rte_pktmbuf_mtod(m_src, uint8_t *),
1094 chain->para.cipher_start_src_offset,
1095 chain->para.dst_data_len -
1096 chain->para.cipher_start_src_offset,
1098 if (unlikely(vc_req->wb == NULL)) {
1099 ret = VIRTIO_CRYPTO_ERR;
1103 digest_offset = m_src->data_len;
1104 digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
1108 /** create a wb_data for digest */
1109 ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
1110 digest_addr, 0, chain->para.hash_result_len,
1112 if (unlikely(ewb->next == NULL)) {
1113 ret = VIRTIO_CRYPTO_ERR;
1117 if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
1118 chain->para.hash_result_len,
1119 nb_descs, vq_size) < 0)) {
1120 ret = VIRTIO_CRYPTO_BADMSG;
1124 op->sym->auth.digest.data = digest_addr;
1125 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
1129 ret = VIRTIO_CRYPTO_BADMSG;
1134 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
1135 if (unlikely(vc_req->inhdr == NULL)) {
1136 ret = VIRTIO_CRYPTO_BADMSG;
1140 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
1142 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1143 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1145 op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
1146 op->sym->cipher.data.length = chain->para.src_data_len -
1147 chain->para.cipher_start_src_offset;
1149 op->sym->auth.data.offset = chain->para.hash_start_src_offset;
1150 op->sym->auth.data.length = chain->para.len_to_hash;
1152 vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
1158 free_wb_data(vc_req->wb, vc_req->wb_pool);
1159 vc_req->len = INHDR_LEN;
1164 * Process on descriptor
1166 static __rte_always_inline int
1167 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
1168 struct vhost_virtqueue *vq, struct rte_crypto_op *op,
1169 struct vring_desc *head, uint16_t desc_idx)
1171 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
1172 struct rte_cryptodev_sym_session *session;
1173 struct virtio_crypto_op_data_req *req, tmp_req;
1174 struct virtio_crypto_inhdr *inhdr;
1175 struct vring_desc *desc = NULL;
1176 uint64_t session_id;
1178 uint32_t nb_descs = vq->size;
1181 vc_req->desc_idx = desc_idx;
1182 vc_req->dev = vcrypto->dev;
1185 if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
1187 nb_descs = dlen / sizeof(struct vring_desc);
1188 /* drop invalid descriptors */
1189 if (unlikely(nb_descs > vq->size))
1191 desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
1192 &dlen, VHOST_ACCESS_RO);
1193 if (unlikely(!desc || dlen != head->len))
1201 vc_req->head = head;
1202 vc_req->zero_copy = vcrypto->option;
1204 req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
1205 if (unlikely(req == NULL)) {
1206 switch (vcrypto->option) {
1207 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1208 err = VIRTIO_CRYPTO_BADMSG;
1209 VC_LOG_ERR("Invalid descriptor");
1211 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1213 if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
1214 &nb_descs, vq->size) < 0)) {
1215 err = VIRTIO_CRYPTO_BADMSG;
1216 VC_LOG_ERR("Invalid descriptor");
1221 err = VIRTIO_CRYPTO_ERR;
1222 VC_LOG_ERR("Invalid option");
1226 if (unlikely(move_desc(vc_req->head, &desc,
1227 sizeof(*req), &nb_descs, vq->size) < 0)) {
1228 VC_LOG_ERR("Incorrect descriptor");
1233 switch (req->header.opcode) {
1234 case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
1235 case VIRTIO_CRYPTO_CIPHER_DECRYPT:
1236 session_id = req->header.session_id;
1238 /* one branch to avoid unnecessary table lookup */
1239 if (vcrypto->cache_session_id != session_id) {
1240 err = rte_hash_lookup_data(vcrypto->session_map,
1241 &session_id, (void **)&session);
1242 if (unlikely(err < 0)) {
1243 err = VIRTIO_CRYPTO_ERR;
1244 VC_LOG_ERR("Failed to find session %"PRIu64,
1249 vcrypto->cache_session = session;
1250 vcrypto->cache_session_id = session_id;
1253 session = vcrypto->cache_session;
1255 err = rte_crypto_op_attach_sym_session(op, session);
1256 if (unlikely(err < 0)) {
1257 err = VIRTIO_CRYPTO_ERR;
1258 VC_LOG_ERR("Failed to attach session to op");
1262 switch (req->u.sym_req.op_type) {
1263 case VIRTIO_CRYPTO_SYM_OP_NONE:
1264 err = VIRTIO_CRYPTO_NOTSUPP;
1266 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1267 err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1268 &req->u.sym_req.u.cipher, desc,
1269 &nb_descs, vq->size);
1271 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1272 err = prepare_sym_chain_op(vcrypto, op, vc_req,
1273 &req->u.sym_req.u.chain, desc,
1274 &nb_descs, vq->size);
1277 if (unlikely(err != 0)) {
1278 VC_LOG_ERR("Failed to process sym request");
1283 VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1284 req->header.opcode);
1292 inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size);
1293 if (likely(inhdr != NULL))
1294 inhdr->status = (uint8_t)err;
1299 static __rte_always_inline struct vhost_virtqueue *
1300 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1301 struct vhost_virtqueue *old_vq)
1303 struct rte_mbuf *m_src = op->sym->m_src;
1304 struct rte_mbuf *m_dst = op->sym->m_dst;
1305 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1308 if (unlikely(!vc_req)) {
1309 VC_LOG_ERR("Failed to retrieve vc_req");
1313 if (old_vq && (vc_req->vq != old_vq))
1316 desc_idx = vc_req->desc_idx;
1318 if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1319 vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1321 if (vc_req->zero_copy == 0)
1322 write_back_data(vc_req);
1325 vc_req->vq->used->ring[desc_idx].id = desc_idx;
1326 vc_req->vq->used->ring[desc_idx].len = vc_req->len;
1328 rte_mempool_put(m_src->pool, (void *)m_src);
1331 rte_mempool_put(m_dst->pool, (void *)m_dst);
1336 static __rte_always_inline uint16_t
1337 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1338 uint16_t nb_ops, int *callfd)
1340 uint16_t processed = 1;
1341 struct vhost_virtqueue *vq, *tmp_vq;
1343 if (unlikely(nb_ops == 0))
1346 vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1347 if (unlikely(vq == NULL))
1351 while ((processed < nb_ops)) {
1352 tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1355 if (unlikely(vq != tmp_vq))
1361 *callfd = vq->callfd;
1363 *(volatile uint16_t *)&vq->used->idx += processed;
1369 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1370 struct rte_mempool *sess_pool,
1371 struct rte_mempool *sess_priv_pool,
1374 struct virtio_net *dev = get_device(vid);
1375 struct rte_hash_parameters params = {0};
1376 struct vhost_crypto *vcrypto;
1381 VC_LOG_ERR("Invalid vid %i", vid);
1385 ret = rte_vhost_driver_set_features(dev->ifname,
1386 VIRTIO_CRYPTO_FEATURES);
1388 VC_LOG_ERR("Error setting features");
1392 vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1393 RTE_CACHE_LINE_SIZE, socket_id);
1395 VC_LOG_ERR("Insufficient memory");
1399 vcrypto->sess_pool = sess_pool;
1400 vcrypto->sess_priv_pool = sess_priv_pool;
1401 vcrypto->cid = cryptodev_id;
1402 vcrypto->cache_session_id = UINT64_MAX;
1403 vcrypto->last_session_id = 1;
1405 vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1407 snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1409 params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1410 params.hash_func = rte_jhash;
1411 params.key_len = sizeof(uint64_t);
1412 params.socket_id = socket_id;
1413 vcrypto->session_map = rte_hash_create(¶ms);
1414 if (!vcrypto->session_map) {
1415 VC_LOG_ERR("Failed to creath session map");
1420 snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1421 vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1422 VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1423 sizeof(struct vhost_crypto_data_req),
1424 RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM,
1426 if (!vcrypto->mbuf_pool) {
1427 VC_LOG_ERR("Failed to creath mbuf pool");
1432 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1433 vcrypto->wb_pool = rte_mempool_create(name,
1434 VHOST_CRYPTO_MBUF_POOL_SIZE,
1435 sizeof(struct vhost_crypto_writeback_data),
1436 128, 0, NULL, NULL, NULL, NULL,
1437 rte_socket_id(), 0);
1438 if (!vcrypto->wb_pool) {
1439 VC_LOG_ERR("Failed to creath mempool");
1444 dev->extern_data = vcrypto;
1445 dev->extern_ops.pre_msg_handle = NULL;
1446 dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1451 if (vcrypto->session_map)
1452 rte_hash_free(vcrypto->session_map);
1453 if (vcrypto->mbuf_pool)
1454 rte_mempool_free(vcrypto->mbuf_pool);
1462 rte_vhost_crypto_free(int vid)
1464 struct virtio_net *dev = get_device(vid);
1465 struct vhost_crypto *vcrypto;
1467 if (unlikely(dev == NULL)) {
1468 VC_LOG_ERR("Invalid vid %i", vid);
1472 vcrypto = dev->extern_data;
1473 if (unlikely(vcrypto == NULL)) {
1474 VC_LOG_ERR("Cannot find required data, is it initialized?");
1478 rte_hash_free(vcrypto->session_map);
1479 rte_mempool_free(vcrypto->mbuf_pool);
1480 rte_mempool_free(vcrypto->wb_pool);
1483 dev->extern_data = NULL;
1484 dev->extern_ops.pre_msg_handle = NULL;
1485 dev->extern_ops.post_msg_handle = NULL;
1491 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1493 struct virtio_net *dev = get_device(vid);
1494 struct vhost_crypto *vcrypto;
1496 if (unlikely(dev == NULL)) {
1497 VC_LOG_ERR("Invalid vid %i", vid);
1501 if (unlikely((uint32_t)option >=
1502 RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1503 VC_LOG_ERR("Invalid option %i", option);
1507 vcrypto = (struct vhost_crypto *)dev->extern_data;
1508 if (unlikely(vcrypto == NULL)) {
1509 VC_LOG_ERR("Cannot find required data, is it initialized?");
1513 if (vcrypto->option == (uint8_t)option)
1516 if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
1517 !(rte_mempool_full(vcrypto->wb_pool))) {
1518 VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1522 if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
1525 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1526 vcrypto->wb_pool = rte_mempool_create(name,
1527 VHOST_CRYPTO_MBUF_POOL_SIZE,
1528 sizeof(struct vhost_crypto_writeback_data),
1529 128, 0, NULL, NULL, NULL, NULL,
1530 rte_socket_id(), 0);
1531 if (!vcrypto->wb_pool) {
1532 VC_LOG_ERR("Failed to creath mbuf pool");
1536 rte_mempool_free(vcrypto->wb_pool);
1537 vcrypto->wb_pool = NULL;
1540 vcrypto->option = (uint8_t)option;
1546 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1547 struct rte_crypto_op **ops, uint16_t nb_ops)
1549 struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1550 struct virtio_net *dev = get_device(vid);
1551 struct vhost_crypto *vcrypto;
1552 struct vhost_virtqueue *vq;
1558 if (unlikely(dev == NULL)) {
1559 VC_LOG_ERR("Invalid vid %i", vid);
1563 if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1564 VC_LOG_ERR("Invalid qid %u", qid);
1568 vcrypto = (struct vhost_crypto *)dev->extern_data;
1569 if (unlikely(vcrypto == NULL)) {
1570 VC_LOG_ERR("Cannot find required data, is it initialized?");
1574 vq = dev->virtqueue[qid];
1576 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1577 start_idx = vq->last_used_idx;
1578 count = avail_idx - start_idx;
1579 count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1580 count = RTE_MIN(count, nb_ops);
1582 if (unlikely(count == 0))
1585 /* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1586 * we need only 1 mbuf as src and dst
1588 switch (vcrypto->option) {
1589 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1590 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1591 (void **)mbufs, count * 2) < 0)) {
1592 VC_LOG_ERR("Insufficient memory");
1596 for (i = 0; i < count; i++) {
1597 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1598 uint16_t desc_idx = vq->avail->ring[used_idx];
1599 struct vring_desc *head = &vq->desc[desc_idx];
1600 struct rte_crypto_op *op = ops[i];
1602 op->sym->m_src = mbufs[i * 2];
1603 op->sym->m_dst = mbufs[i * 2 + 1];
1604 op->sym->m_src->data_off = 0;
1605 op->sym->m_dst->data_off = 0;
1607 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1608 op, head, desc_idx) < 0))
1612 if (unlikely(i < count))
1613 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1614 (void **)&mbufs[i * 2],
1619 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1620 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1621 (void **)mbufs, count) < 0)) {
1622 VC_LOG_ERR("Insufficient memory");
1626 for (i = 0; i < count; i++) {
1627 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1628 uint16_t desc_idx = vq->avail->ring[used_idx];
1629 struct vring_desc *head = &vq->desc[desc_idx];
1630 struct rte_crypto_op *op = ops[i];
1632 op->sym->m_src = mbufs[i];
1633 op->sym->m_dst = NULL;
1634 op->sym->m_src->data_off = 0;
1636 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1637 op, head, desc_idx) < 0))
1641 if (unlikely(i < count))
1642 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1650 vq->last_used_idx += i;
1656 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1657 uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1659 struct rte_crypto_op **tmp_ops = ops;
1660 uint16_t count = 0, left = nb_ops;
1665 count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1667 if (unlikely(count == 0))
1670 tmp_ops = &tmp_ops[count];
1673 callfds[idx++] = callfd;
1675 if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1676 VC_LOG_ERR("Too many vqs");
1683 return nb_ops - left;