1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
4 #include <rte_malloc.h>
8 #include <rte_cryptodev.h>
10 #include "rte_vhost_crypto.h"
12 #include "vhost_user.h"
13 #include "virtio_crypto.h"
15 #define INHDR_LEN (sizeof(struct virtio_crypto_inhdr))
16 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
17 sizeof(struct rte_crypto_sym_op))
19 #ifdef RTE_LIBRTE_VHOST_DEBUG
20 #define VC_LOG_ERR(fmt, args...) \
21 RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n", \
22 "Vhost-Crypto", __func__, __LINE__, ## args)
23 #define VC_LOG_INFO(fmt, args...) \
24 RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n", \
25 "Vhost-Crypto", __func__, __LINE__, ## args)
27 #define VC_LOG_DBG(fmt, args...) \
28 RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n", \
29 "Vhost-Crypto", __func__, __LINE__, ## args)
31 #define VC_LOG_ERR(fmt, args...) \
32 RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
33 #define VC_LOG_INFO(fmt, args...) \
34 RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
35 #define VC_LOG_DBG(fmt, args...)
38 #define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) | \
39 (1 << VIRTIO_RING_F_INDIRECT_DESC) | \
40 (1 << VIRTIO_RING_F_EVENT_IDX) | \
41 (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) | \
42 (1 << VIRTIO_CRYPTO_SERVICE_MAC) | \
43 (1 << VIRTIO_NET_F_CTRL_VQ) | \
44 (1 << VHOST_USER_PROTOCOL_F_CONFIG))
46 #define IOVA_TO_VVA(t, r, a, l, p) \
47 ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
50 * vhost_crypto_desc is used to copy original vring_desc to the local buffer
51 * before processing (except the next index). The copy result will be an
52 * array of vhost_crypto_desc elements that follows the sequence of original
53 * vring_desc.next is arranged.
55 #define vhost_crypto_desc vring_desc
58 cipher_algo_transform(uint32_t virtio_cipher_algo,
59 enum rte_crypto_cipher_algorithm *algo)
61 switch (virtio_cipher_algo) {
62 case VIRTIO_CRYPTO_CIPHER_AES_CBC:
63 *algo = RTE_CRYPTO_CIPHER_AES_CBC;
65 case VIRTIO_CRYPTO_CIPHER_AES_CTR:
66 *algo = RTE_CRYPTO_CIPHER_AES_CTR;
68 case VIRTIO_CRYPTO_CIPHER_DES_ECB:
69 *algo = -VIRTIO_CRYPTO_NOTSUPP;
71 case VIRTIO_CRYPTO_CIPHER_DES_CBC:
72 *algo = RTE_CRYPTO_CIPHER_DES_CBC;
74 case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
75 *algo = RTE_CRYPTO_CIPHER_3DES_ECB;
77 case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
78 *algo = RTE_CRYPTO_CIPHER_3DES_CBC;
80 case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
81 *algo = RTE_CRYPTO_CIPHER_3DES_CTR;
83 case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
84 *algo = RTE_CRYPTO_CIPHER_KASUMI_F8;
86 case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
87 *algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
89 case VIRTIO_CRYPTO_CIPHER_AES_F8:
90 *algo = RTE_CRYPTO_CIPHER_AES_F8;
92 case VIRTIO_CRYPTO_CIPHER_AES_XTS:
93 *algo = RTE_CRYPTO_CIPHER_AES_XTS;
95 case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
96 *algo = RTE_CRYPTO_CIPHER_ZUC_EEA3;
99 return -VIRTIO_CRYPTO_BADMSG;
107 auth_algo_transform(uint32_t virtio_auth_algo,
108 enum rte_crypto_auth_algorithm *algo)
110 switch (virtio_auth_algo) {
111 case VIRTIO_CRYPTO_NO_MAC:
112 *algo = RTE_CRYPTO_AUTH_NULL;
114 case VIRTIO_CRYPTO_MAC_HMAC_MD5:
115 *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
117 case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
118 *algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
120 case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
121 *algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
123 case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
124 *algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
126 case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
127 *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
129 case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
130 *algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
132 case VIRTIO_CRYPTO_MAC_CMAC_AES:
133 *algo = RTE_CRYPTO_AUTH_AES_CMAC;
135 case VIRTIO_CRYPTO_MAC_KASUMI_F9:
136 *algo = RTE_CRYPTO_AUTH_KASUMI_F9;
138 case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
139 *algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
141 case VIRTIO_CRYPTO_MAC_GMAC_AES:
142 *algo = RTE_CRYPTO_AUTH_AES_GMAC;
144 case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
145 *algo = RTE_CRYPTO_AUTH_AES_CBC_MAC;
147 case VIRTIO_CRYPTO_MAC_XCBC_AES:
148 *algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
150 case VIRTIO_CRYPTO_MAC_CMAC_3DES:
151 case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
152 case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
153 return -VIRTIO_CRYPTO_NOTSUPP;
155 return -VIRTIO_CRYPTO_BADMSG;
161 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
166 case RTE_CRYPTO_CIPHER_3DES_CBC:
169 case RTE_CRYPTO_CIPHER_3DES_CTR:
172 case RTE_CRYPTO_CIPHER_3DES_ECB:
175 case RTE_CRYPTO_CIPHER_AES_CBC:
179 /* TODO: add common algos */
190 * vhost_crypto struct is used to maintain a number of virtio_cryptos and
191 * one DPDK crypto device that deals with all crypto workloads. It is declared
192 * here and defined in vhost_crypto.c
194 struct vhost_crypto {
195 /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
198 struct rte_hash *session_map;
199 struct rte_mempool *mbuf_pool;
200 struct rte_mempool *sess_pool;
201 struct rte_mempool *sess_priv_pool;
202 struct rte_mempool *wb_pool;
204 /** DPDK cryptodev ID */
208 uint64_t last_session_id;
210 uint64_t cache_session_id;
211 struct rte_cryptodev_sym_session *cache_session;
212 /** socket id for the device */
215 struct virtio_net *dev;
218 } __rte_cache_aligned;
220 struct vhost_crypto_writeback_data {
224 struct vhost_crypto_writeback_data *next;
227 struct vhost_crypto_data_req {
228 struct vring_desc *head;
229 struct virtio_net *dev;
230 struct virtio_crypto_inhdr *inhdr;
231 struct vhost_virtqueue *vq;
232 struct vhost_crypto_writeback_data *wb;
233 struct rte_mempool *wb_pool;
240 transform_cipher_param(struct rte_crypto_sym_xform *xform,
241 VhostUserCryptoSessionParam *param)
245 ret = cipher_algo_transform(param->cipher_algo, &xform->cipher.algo);
246 if (unlikely(ret < 0))
249 if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
250 VC_LOG_DBG("Invalid cipher key length\n");
251 return -VIRTIO_CRYPTO_BADMSG;
254 xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
255 xform->cipher.key.length = param->cipher_key_len;
256 if (xform->cipher.key.length > 0)
257 xform->cipher.key.data = param->cipher_key_buf;
258 if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
259 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
260 else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
261 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
263 VC_LOG_DBG("Bad operation type");
264 return -VIRTIO_CRYPTO_BADMSG;
267 ret = get_iv_len(xform->cipher.algo);
268 if (unlikely(ret < 0))
270 xform->cipher.iv.length = (uint16_t)ret;
271 xform->cipher.iv.offset = IV_OFFSET;
276 transform_chain_param(struct rte_crypto_sym_xform *xforms,
277 VhostUserCryptoSessionParam *param)
279 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
282 switch (param->chaining_dir) {
283 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
285 xform_cipher = xforms->next;
286 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
287 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
289 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
290 xform_cipher = xforms;
291 xform_auth = xforms->next;
292 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
293 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
296 return -VIRTIO_CRYPTO_BADMSG;
300 ret = cipher_algo_transform(param->cipher_algo,
301 &xform_cipher->cipher.algo);
302 if (unlikely(ret < 0))
305 if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) {
306 VC_LOG_DBG("Invalid cipher key length\n");
307 return -VIRTIO_CRYPTO_BADMSG;
310 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
311 xform_cipher->cipher.key.length = param->cipher_key_len;
312 xform_cipher->cipher.key.data = param->cipher_key_buf;
313 ret = get_iv_len(xform_cipher->cipher.algo);
314 if (unlikely(ret < 0))
316 xform_cipher->cipher.iv.length = (uint16_t)ret;
317 xform_cipher->cipher.iv.offset = IV_OFFSET;
320 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
321 ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo);
322 if (unlikely(ret < 0))
325 if (param->auth_key_len > VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH) {
326 VC_LOG_DBG("Invalid auth key length\n");
327 return -VIRTIO_CRYPTO_BADMSG;
330 xform_auth->auth.digest_length = param->digest_len;
331 xform_auth->auth.key.length = param->auth_key_len;
332 xform_auth->auth.key.data = param->auth_key_buf;
338 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
339 VhostUserCryptoSessionParam *sess_param)
341 struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
342 struct rte_cryptodev_sym_session *session;
345 switch (sess_param->op_type) {
346 case VIRTIO_CRYPTO_SYM_OP_NONE:
347 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
348 ret = transform_cipher_param(&xform1, sess_param);
350 VC_LOG_ERR("Error transform session msg (%i)", ret);
351 sess_param->session_id = ret;
355 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
356 if (unlikely(sess_param->hash_mode !=
357 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
358 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
359 VC_LOG_ERR("Error transform session message (%i)",
360 -VIRTIO_CRYPTO_NOTSUPP);
364 xform1.next = &xform2;
366 ret = transform_chain_param(&xform1, sess_param);
368 VC_LOG_ERR("Error transform session message (%i)", ret);
369 sess_param->session_id = ret;
375 VC_LOG_ERR("Algorithm not yet supported");
376 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
380 session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
382 VC_LOG_ERR("Failed to create session");
383 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
387 if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
388 vcrypto->sess_priv_pool) < 0) {
389 VC_LOG_ERR("Failed to initialize session");
390 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
394 /* insert hash to map */
395 if (rte_hash_add_key_data(vcrypto->session_map,
396 &vcrypto->last_session_id, session) < 0) {
397 VC_LOG_ERR("Failed to insert session to hash table");
399 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
400 VC_LOG_ERR("Failed to clear session");
402 if (rte_cryptodev_sym_session_free(session) < 0)
403 VC_LOG_ERR("Failed to free session");
405 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
409 VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
410 vcrypto->last_session_id, vcrypto->dev->vid);
412 sess_param->session_id = vcrypto->last_session_id;
413 vcrypto->last_session_id++;
417 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
419 struct rte_cryptodev_sym_session *session;
420 uint64_t sess_id = session_id;
423 ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
426 if (unlikely(ret < 0)) {
427 VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
428 return -VIRTIO_CRYPTO_INVSESS;
431 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
432 VC_LOG_DBG("Failed to clear session");
433 return -VIRTIO_CRYPTO_ERR;
436 if (rte_cryptodev_sym_session_free(session) < 0) {
437 VC_LOG_DBG("Failed to free session");
438 return -VIRTIO_CRYPTO_ERR;
441 if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
442 VC_LOG_DBG("Failed to delete session from hash table.");
443 return -VIRTIO_CRYPTO_ERR;
446 VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
452 static enum rte_vhost_msg_result
453 vhost_crypto_msg_post_handler(int vid, void *msg)
455 struct virtio_net *dev = get_device(vid);
456 struct vhost_crypto *vcrypto;
457 VhostUserMsg *vmsg = msg;
458 enum rte_vhost_msg_result ret = RTE_VHOST_MSG_RESULT_OK;
461 VC_LOG_ERR("Invalid vid %i", vid);
462 return RTE_VHOST_MSG_RESULT_ERR;
465 vcrypto = dev->extern_data;
466 if (vcrypto == NULL) {
467 VC_LOG_ERR("Cannot find required data, is it initialized?");
468 return RTE_VHOST_MSG_RESULT_ERR;
471 switch (vmsg->request.master) {
472 case VHOST_USER_CRYPTO_CREATE_SESS:
473 vhost_crypto_create_sess(vcrypto,
474 &vmsg->payload.crypto_session);
476 ret = RTE_VHOST_MSG_RESULT_REPLY;
478 case VHOST_USER_CRYPTO_CLOSE_SESS:
479 if (vhost_crypto_close_sess(vcrypto, vmsg->payload.u64))
480 ret = RTE_VHOST_MSG_RESULT_ERR;
483 ret = RTE_VHOST_MSG_RESULT_NOT_HANDLED;
490 static __rte_always_inline struct vhost_crypto_desc *
491 find_write_desc(struct vhost_crypto_desc *head, struct vhost_crypto_desc *desc,
492 uint32_t max_n_descs)
497 while (desc - head < (int)max_n_descs) {
498 if (desc->flags & VRING_DESC_F_WRITE)
506 static __rte_always_inline struct virtio_crypto_inhdr *
507 reach_inhdr(struct vhost_crypto_data_req *vc_req,
508 struct vhost_crypto_desc *head,
509 uint32_t max_n_descs)
511 struct virtio_crypto_inhdr *inhdr;
512 struct vhost_crypto_desc *last = head + (max_n_descs - 1);
513 uint64_t dlen = last->len;
515 if (unlikely(dlen != sizeof(*inhdr)))
518 inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, last->addr,
519 &dlen, VHOST_ACCESS_WO);
520 if (unlikely(!inhdr || dlen != last->len))
526 static __rte_always_inline int
527 move_desc(struct vhost_crypto_desc *head,
528 struct vhost_crypto_desc **cur_desc,
529 uint32_t size, uint32_t max_n_descs)
531 struct vhost_crypto_desc *desc = *cur_desc;
532 int left = size - desc->len;
534 while (desc->flags & VRING_DESC_F_NEXT && left > 0 &&
536 desc - head < (int)max_n_descs) {
541 if (unlikely(left > 0))
544 if (unlikely(head - desc == (int)max_n_descs))
547 *cur_desc = desc + 1;
552 static __rte_always_inline void *
553 get_data_ptr(struct vhost_crypto_data_req *vc_req,
554 struct vhost_crypto_desc *cur_desc,
558 uint64_t dlen = cur_desc->len;
560 data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
561 if (unlikely(!data || dlen != cur_desc->len)) {
562 VC_LOG_ERR("Failed to map object");
569 static __rte_always_inline int
570 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
571 struct vhost_crypto_desc *head,
572 struct vhost_crypto_desc **cur_desc,
573 uint32_t size, uint32_t max_n_descs)
575 struct vhost_crypto_desc *desc = *cur_desc;
576 uint64_t remain, addr, dlen, len;
578 uint8_t *data = dst_data;
582 to_copy = RTE_MIN(desc->len, (uint32_t)left);
584 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
586 if (unlikely(!src || !dlen))
589 rte_memcpy((uint8_t *)data, src, dlen);
592 if (unlikely(dlen < to_copy)) {
593 remain = to_copy - dlen;
594 addr = desc->addr + dlen;
598 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
600 if (unlikely(!src || !len)) {
601 VC_LOG_ERR("Failed to map descriptor");
605 rte_memcpy(data, src, len);
614 while (desc >= head && desc - head < (int)max_n_descs && left) {
616 to_copy = RTE_MIN(desc->len, (uint32_t)left);
618 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
620 if (unlikely(!src || !dlen)) {
621 VC_LOG_ERR("Failed to map descriptor");
625 rte_memcpy(data, src, dlen);
628 if (unlikely(dlen < to_copy)) {
629 remain = to_copy - dlen;
630 addr = desc->addr + dlen;
634 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
636 if (unlikely(!src || !len)) {
637 VC_LOG_ERR("Failed to map descriptor");
641 rte_memcpy(data, src, len);
651 if (unlikely(left > 0)) {
652 VC_LOG_ERR("Incorrect virtio descriptor");
656 if (unlikely(desc - head == (int)max_n_descs))
659 *cur_desc = desc + 1;
665 write_back_data(struct vhost_crypto_data_req *vc_req)
667 struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
670 rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
671 memset(wb_data->src, 0, wb_data->len);
673 wb_data = wb_data->next;
674 rte_mempool_put(vc_req->wb_pool, wb_last);
679 free_wb_data(struct vhost_crypto_writeback_data *wb_data,
680 struct rte_mempool *mp)
682 while (wb_data->next != NULL)
683 free_wb_data(wb_data->next, mp);
685 rte_mempool_put(mp, wb_data);
689 * The function will allocate a vhost_crypto_writeback_data linked list
690 * containing the source and destination data pointers for the write back
691 * operation after dequeued from Cryptodev PMD queues.
694 * The vhost crypto data request pointer
696 * The pointer of the current in use descriptor pointer. The content of
697 * cur_desc is expected to be updated after the function execution.
699 * The last write back data element to be returned. It is used only in cipher
700 * and hash chain operations.
702 * The source data pointer
704 * The offset to both source and destination data. For source data the offset
705 * is the number of bytes between src and start point of cipher operation. For
706 * destination data the offset is the number of bytes from *cur_desc->addr
707 * to the point where the src will be written to.
708 * @param write_back_len
709 * The size of the write back length.
711 * The pointer to the start of the write back data linked list.
713 static __rte_always_inline struct vhost_crypto_writeback_data *
714 prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
715 struct vhost_crypto_desc *head_desc,
716 struct vhost_crypto_desc **cur_desc,
717 struct vhost_crypto_writeback_data **end_wb_data,
720 uint64_t write_back_len,
721 uint32_t max_n_descs)
723 struct vhost_crypto_writeback_data *wb_data, *head;
724 struct vhost_crypto_desc *desc = *cur_desc;
729 ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
730 if (unlikely(ret < 0)) {
731 VC_LOG_ERR("no memory");
737 if (likely(desc->len > offset)) {
738 wb_data->src = src + offset;
740 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
741 &dlen, VHOST_ACCESS_RW);
742 if (unlikely(!dst || dlen != desc->len)) {
743 VC_LOG_ERR("Failed to map descriptor");
747 wb_data->dst = dst + offset;
748 wb_data->len = RTE_MIN(dlen - offset, write_back_len);
749 write_back_len -= wb_data->len;
750 src += offset + wb_data->len;
753 if (unlikely(write_back_len)) {
754 ret = rte_mempool_get(vc_req->wb_pool,
755 (void **)&(wb_data->next));
756 if (unlikely(ret < 0)) {
757 VC_LOG_ERR("no memory");
761 wb_data = wb_data->next;
763 wb_data->next = NULL;
767 while (write_back_len &&
769 desc - head_desc < (int)max_n_descs) {
771 if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
772 VC_LOG_ERR("incorrect descriptor");
776 if (desc->len <= offset) {
782 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
783 VHOST_ACCESS_RW) + offset;
784 if (unlikely(dst == NULL || dlen != desc->len)) {
785 VC_LOG_ERR("Failed to map descriptor");
789 wb_data->src = src + offset;
791 wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
792 write_back_len -= wb_data->len;
796 if (write_back_len) {
797 ret = rte_mempool_get(vc_req->wb_pool,
798 (void **)&(wb_data->next));
799 if (unlikely(ret < 0)) {
800 VC_LOG_ERR("no memory");
804 wb_data = wb_data->next;
806 wb_data->next = NULL;
809 if (unlikely(desc - head_desc == (int)max_n_descs))
812 *cur_desc = desc + 1;
814 *end_wb_data = wb_data;
820 free_wb_data(head, vc_req->wb_pool);
825 static __rte_always_inline uint8_t
826 vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req *req)
828 if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
829 (req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
830 (req->para.dst_data_len >= req->para.src_data_len) &&
831 (req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE)))
832 return VIRTIO_CRYPTO_OK;
833 return VIRTIO_CRYPTO_BADMSG;
836 static __rte_always_inline uint8_t
837 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
838 struct vhost_crypto_data_req *vc_req,
839 struct virtio_crypto_cipher_data_req *cipher,
840 struct vhost_crypto_desc *head,
841 uint32_t max_n_descs)
843 struct vhost_crypto_desc *desc = head;
844 struct vhost_crypto_writeback_data *ewb = NULL;
845 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
846 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
847 uint8_t ret = vhost_crypto_check_cipher_request(cipher);
849 if (unlikely(ret != VIRTIO_CRYPTO_OK))
854 if (unlikely(copy_data(iv_data, vc_req, head, &desc,
855 cipher->para.iv_len, max_n_descs))) {
856 ret = VIRTIO_CRYPTO_BADMSG;
860 switch (vcrypto->option) {
861 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
862 m_src->data_len = cipher->para.src_data_len;
863 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
864 cipher->para.src_data_len);
865 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
866 if (unlikely(m_src->buf_iova == 0 ||
867 m_src->buf_addr == NULL)) {
868 VC_LOG_ERR("zero_copy may fail due to cross page data");
869 ret = VIRTIO_CRYPTO_ERR;
873 if (unlikely(move_desc(head, &desc, cipher->para.src_data_len,
875 VC_LOG_ERR("Incorrect descriptor");
876 ret = VIRTIO_CRYPTO_ERR;
881 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
882 vc_req->wb_pool = vcrypto->wb_pool;
883 m_src->data_len = cipher->para.src_data_len;
884 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
885 vc_req, head, &desc, cipher->para.src_data_len,
887 ret = VIRTIO_CRYPTO_BADMSG;
892 ret = VIRTIO_CRYPTO_BADMSG;
897 desc = find_write_desc(head, desc, max_n_descs);
898 if (unlikely(!desc)) {
899 VC_LOG_ERR("Cannot find write location");
900 ret = VIRTIO_CRYPTO_BADMSG;
904 switch (vcrypto->option) {
905 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
906 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
907 desc->addr, cipher->para.dst_data_len);
908 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
909 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
910 VC_LOG_ERR("zero_copy may fail due to cross page data");
911 ret = VIRTIO_CRYPTO_ERR;
915 if (unlikely(move_desc(head, &desc, cipher->para.dst_data_len,
917 VC_LOG_ERR("Incorrect descriptor");
918 ret = VIRTIO_CRYPTO_ERR;
922 m_dst->data_len = cipher->para.dst_data_len;
924 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
925 vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
926 rte_pktmbuf_mtod(m_src, uint8_t *), 0,
927 cipher->para.dst_data_len, max_n_descs);
928 if (unlikely(vc_req->wb == NULL)) {
929 ret = VIRTIO_CRYPTO_ERR;
935 ret = VIRTIO_CRYPTO_BADMSG;
940 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
941 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
943 op->sym->cipher.data.offset = 0;
944 op->sym->cipher.data.length = cipher->para.src_data_len;
946 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
947 if (unlikely(vc_req->inhdr == NULL)) {
948 ret = VIRTIO_CRYPTO_BADMSG;
952 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
953 vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
959 free_wb_data(vc_req->wb, vc_req->wb_pool);
961 vc_req->len = INHDR_LEN;
965 static __rte_always_inline uint8_t
966 vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req *req)
968 if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
969 (req->para.src_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
970 (req->para.dst_data_len >= req->para.src_data_len) &&
971 (req->para.dst_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
972 (req->para.cipher_start_src_offset <
973 VHOST_CRYPTO_MAX_DATA_SIZE) &&
974 (req->para.len_to_cipher <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
975 (req->para.hash_start_src_offset <
976 VHOST_CRYPTO_MAX_DATA_SIZE) &&
977 (req->para.len_to_hash <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
978 (req->para.cipher_start_src_offset + req->para.len_to_cipher <=
979 req->para.src_data_len) &&
980 (req->para.hash_start_src_offset + req->para.len_to_hash <=
981 req->para.src_data_len) &&
982 (req->para.dst_data_len + req->para.hash_result_len <=
983 VHOST_CRYPTO_MAX_DATA_SIZE)))
984 return VIRTIO_CRYPTO_OK;
985 return VIRTIO_CRYPTO_BADMSG;
988 static __rte_always_inline uint8_t
989 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
990 struct vhost_crypto_data_req *vc_req,
991 struct virtio_crypto_alg_chain_data_req *chain,
992 struct vhost_crypto_desc *head,
993 uint32_t max_n_descs)
995 struct vhost_crypto_desc *desc = head, *digest_desc;
996 struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
997 struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
998 uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
999 uint32_t digest_offset;
1001 uint8_t ret = vhost_crypto_check_chain_request(chain);
1003 if (unlikely(ret != VIRTIO_CRYPTO_OK))
1008 if (unlikely(copy_data(iv_data, vc_req, head, &desc,
1009 chain->para.iv_len, max_n_descs) < 0)) {
1010 ret = VIRTIO_CRYPTO_BADMSG;
1014 switch (vcrypto->option) {
1015 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1016 m_src->data_len = chain->para.src_data_len;
1017 m_dst->data_len = chain->para.dst_data_len;
1019 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
1020 chain->para.src_data_len);
1021 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
1022 if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
1023 VC_LOG_ERR("zero_copy may fail due to cross page data");
1024 ret = VIRTIO_CRYPTO_ERR;
1028 if (unlikely(move_desc(head, &desc, chain->para.src_data_len,
1029 max_n_descs) < 0)) {
1030 VC_LOG_ERR("Incorrect descriptor");
1031 ret = VIRTIO_CRYPTO_ERR;
1035 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1036 vc_req->wb_pool = vcrypto->wb_pool;
1037 m_src->data_len = chain->para.src_data_len;
1038 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
1039 vc_req, head, &desc, chain->para.src_data_len,
1040 max_n_descs) < 0)) {
1041 ret = VIRTIO_CRYPTO_BADMSG;
1047 ret = VIRTIO_CRYPTO_BADMSG;
1052 desc = find_write_desc(head, desc, max_n_descs);
1053 if (unlikely(!desc)) {
1054 VC_LOG_ERR("Cannot find write location");
1055 ret = VIRTIO_CRYPTO_BADMSG;
1059 switch (vcrypto->option) {
1060 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1061 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
1062 desc->addr, chain->para.dst_data_len);
1063 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
1064 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
1065 VC_LOG_ERR("zero_copy may fail due to cross page data");
1066 ret = VIRTIO_CRYPTO_ERR;
1070 if (unlikely(move_desc(vc_req->head, &desc,
1071 chain->para.dst_data_len, max_n_descs) < 0)) {
1072 VC_LOG_ERR("Incorrect descriptor");
1073 ret = VIRTIO_CRYPTO_ERR;
1077 op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
1078 desc->addr, chain->para.hash_result_len);
1079 op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
1081 if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
1082 VC_LOG_ERR("zero_copy may fail due to cross page data");
1083 ret = VIRTIO_CRYPTO_ERR;
1087 if (unlikely(move_desc(head, &desc,
1088 chain->para.hash_result_len,
1089 max_n_descs) < 0)) {
1090 VC_LOG_ERR("Incorrect descriptor");
1091 ret = VIRTIO_CRYPTO_ERR;
1096 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1097 vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
1098 rte_pktmbuf_mtod(m_src, uint8_t *),
1099 chain->para.cipher_start_src_offset,
1100 chain->para.dst_data_len -
1101 chain->para.cipher_start_src_offset,
1103 if (unlikely(vc_req->wb == NULL)) {
1104 ret = VIRTIO_CRYPTO_ERR;
1109 digest_offset = m_src->data_len;
1110 digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
1113 /** create a wb_data for digest */
1114 ewb->next = prepare_write_back_data(vc_req, head, &desc,
1115 &ewb2, digest_addr, 0,
1116 chain->para.hash_result_len, max_n_descs);
1117 if (unlikely(ewb->next == NULL)) {
1118 ret = VIRTIO_CRYPTO_ERR;
1122 if (unlikely(copy_data(digest_addr, vc_req, head, &digest_desc,
1123 chain->para.hash_result_len,
1124 max_n_descs) < 0)) {
1125 ret = VIRTIO_CRYPTO_BADMSG;
1129 op->sym->auth.digest.data = digest_addr;
1130 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
1134 ret = VIRTIO_CRYPTO_BADMSG;
1139 vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
1140 if (unlikely(vc_req->inhdr == NULL)) {
1141 ret = VIRTIO_CRYPTO_BADMSG;
1145 vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
1147 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1148 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1150 op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
1151 op->sym->cipher.data.length = chain->para.src_data_len -
1152 chain->para.cipher_start_src_offset;
1154 op->sym->auth.data.offset = chain->para.hash_start_src_offset;
1155 op->sym->auth.data.length = chain->para.len_to_hash;
1157 vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
1163 free_wb_data(vc_req->wb, vc_req->wb_pool);
1164 vc_req->len = INHDR_LEN;
1169 * Process on descriptor
1171 static __rte_always_inline int
1172 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
1173 struct vhost_virtqueue *vq, struct rte_crypto_op *op,
1174 struct vring_desc *head, struct vhost_crypto_desc *descs,
1177 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
1178 struct rte_cryptodev_sym_session *session;
1179 struct virtio_crypto_op_data_req req;
1180 struct virtio_crypto_inhdr *inhdr;
1181 struct vhost_crypto_desc *desc = descs;
1182 struct vring_desc *src_desc;
1183 uint64_t session_id;
1185 uint32_t nb_descs = 0, max_n_descs, i;
1188 vc_req->desc_idx = desc_idx;
1189 vc_req->dev = vcrypto->dev;
1192 if (unlikely((head->flags & VRING_DESC_F_INDIRECT) == 0)) {
1193 VC_LOG_ERR("Invalid descriptor");
1198 src_desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
1199 &dlen, VHOST_ACCESS_RO);
1200 if (unlikely(!src_desc || dlen != head->len)) {
1201 VC_LOG_ERR("Invalid descriptor");
1206 nb_descs = max_n_descs = dlen / sizeof(struct vring_desc);
1207 if (unlikely(nb_descs > VHOST_CRYPTO_MAX_N_DESC || nb_descs == 0)) {
1208 err = VIRTIO_CRYPTO_ERR;
1209 VC_LOG_ERR("Cannot process num of descriptors %u", nb_descs);
1211 struct vring_desc *inhdr_desc = head;
1212 while (inhdr_desc->flags & VRING_DESC_F_NEXT) {
1213 if (inhdr_desc->next >= max_n_descs)
1215 inhdr_desc = &head[inhdr_desc->next];
1217 if (inhdr_desc->len != sizeof(*inhdr))
1219 inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *,
1220 vc_req, inhdr_desc->addr, &dlen,
1222 if (unlikely(!inhdr || dlen != inhdr_desc->len))
1224 inhdr->status = VIRTIO_CRYPTO_ERR;
1229 /* copy descriptors to local variable */
1230 for (i = 0; i < max_n_descs; i++) {
1231 desc->addr = src_desc->addr;
1232 desc->len = src_desc->len;
1233 desc->flags = src_desc->flags;
1235 if (unlikely((src_desc->flags & VRING_DESC_F_NEXT) == 0))
1237 if (unlikely(src_desc->next >= max_n_descs)) {
1238 err = VIRTIO_CRYPTO_BADMSG;
1239 VC_LOG_ERR("Invalid descriptor");
1242 src_desc = &head[src_desc->next];
1245 vc_req->head = head;
1246 vc_req->zero_copy = vcrypto->option;
1248 nb_descs = desc - descs;
1251 if (unlikely(desc->len < sizeof(req))) {
1252 err = VIRTIO_CRYPTO_BADMSG;
1253 VC_LOG_ERR("Invalid descriptor");
1257 if (unlikely(copy_data(&req, vc_req, descs, &desc, sizeof(req),
1258 max_n_descs) < 0)) {
1259 err = VIRTIO_CRYPTO_BADMSG;
1260 VC_LOG_ERR("Invalid descriptor");
1264 /* desc is advanced by 1 now */
1267 switch (req.header.opcode) {
1268 case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
1269 case VIRTIO_CRYPTO_CIPHER_DECRYPT:
1270 session_id = req.header.session_id;
1272 /* one branch to avoid unnecessary table lookup */
1273 if (vcrypto->cache_session_id != session_id) {
1274 err = rte_hash_lookup_data(vcrypto->session_map,
1275 &session_id, (void **)&session);
1276 if (unlikely(err < 0)) {
1277 err = VIRTIO_CRYPTO_ERR;
1278 VC_LOG_ERR("Failed to find session %"PRIu64,
1283 vcrypto->cache_session = session;
1284 vcrypto->cache_session_id = session_id;
1287 session = vcrypto->cache_session;
1289 err = rte_crypto_op_attach_sym_session(op, session);
1290 if (unlikely(err < 0)) {
1291 err = VIRTIO_CRYPTO_ERR;
1292 VC_LOG_ERR("Failed to attach session to op");
1296 switch (req.u.sym_req.op_type) {
1297 case VIRTIO_CRYPTO_SYM_OP_NONE:
1298 err = VIRTIO_CRYPTO_NOTSUPP;
1300 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1301 err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1302 &req.u.sym_req.u.cipher, desc,
1305 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1306 err = prepare_sym_chain_op(vcrypto, op, vc_req,
1307 &req.u.sym_req.u.chain, desc,
1311 if (unlikely(err != 0)) {
1312 VC_LOG_ERR("Failed to process sym request");
1317 err = VIRTIO_CRYPTO_ERR;
1318 VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1327 inhdr = reach_inhdr(vc_req, descs, max_n_descs);
1328 if (likely(inhdr != NULL))
1329 inhdr->status = (uint8_t)err;
1334 static __rte_always_inline struct vhost_virtqueue *
1335 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1336 struct vhost_virtqueue *old_vq)
1338 struct rte_mbuf *m_src = op->sym->m_src;
1339 struct rte_mbuf *m_dst = op->sym->m_dst;
1340 struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1341 struct vhost_virtqueue *vq = vc_req->vq;
1342 uint16_t used_idx = vc_req->desc_idx, desc_idx;
1344 if (unlikely(!vc_req)) {
1345 VC_LOG_ERR("Failed to retrieve vc_req");
1349 if (old_vq && (vq != old_vq))
1352 if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1353 vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1355 if (vc_req->zero_copy == 0)
1356 write_back_data(vc_req);
1359 desc_idx = vq->avail->ring[used_idx];
1360 vq->used->ring[desc_idx].id = vq->avail->ring[desc_idx];
1361 vq->used->ring[desc_idx].len = vc_req->len;
1363 rte_mempool_put(m_src->pool, (void *)m_src);
1366 rte_mempool_put(m_dst->pool, (void *)m_dst);
1371 static __rte_always_inline uint16_t
1372 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1373 uint16_t nb_ops, int *callfd)
1375 uint16_t processed = 1;
1376 struct vhost_virtqueue *vq, *tmp_vq;
1378 if (unlikely(nb_ops == 0))
1381 vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1382 if (unlikely(vq == NULL))
1386 while ((processed < nb_ops)) {
1387 tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1390 if (unlikely(vq != tmp_vq))
1396 *callfd = vq->callfd;
1398 *(volatile uint16_t *)&vq->used->idx += processed;
1404 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1405 struct rte_mempool *sess_pool,
1406 struct rte_mempool *sess_priv_pool,
1409 struct virtio_net *dev = get_device(vid);
1410 struct rte_hash_parameters params = {0};
1411 struct vhost_crypto *vcrypto;
1416 VC_LOG_ERR("Invalid vid %i", vid);
1420 ret = rte_vhost_driver_set_features(dev->ifname,
1421 VIRTIO_CRYPTO_FEATURES);
1423 VC_LOG_ERR("Error setting features");
1427 vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1428 RTE_CACHE_LINE_SIZE, socket_id);
1430 VC_LOG_ERR("Insufficient memory");
1434 vcrypto->sess_pool = sess_pool;
1435 vcrypto->sess_priv_pool = sess_priv_pool;
1436 vcrypto->cid = cryptodev_id;
1437 vcrypto->cache_session_id = UINT64_MAX;
1438 vcrypto->last_session_id = 1;
1440 vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1442 snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1444 params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1445 params.hash_func = rte_jhash;
1446 params.key_len = sizeof(uint64_t);
1447 params.socket_id = socket_id;
1448 vcrypto->session_map = rte_hash_create(¶ms);
1449 if (!vcrypto->session_map) {
1450 VC_LOG_ERR("Failed to creath session map");
1455 snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1456 vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1457 VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1458 sizeof(struct vhost_crypto_data_req),
1459 VHOST_CRYPTO_MAX_DATA_SIZE + RTE_PKTMBUF_HEADROOM,
1461 if (!vcrypto->mbuf_pool) {
1462 VC_LOG_ERR("Failed to creath mbuf pool");
1467 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1468 vcrypto->wb_pool = rte_mempool_create(name,
1469 VHOST_CRYPTO_MBUF_POOL_SIZE,
1470 sizeof(struct vhost_crypto_writeback_data),
1471 128, 0, NULL, NULL, NULL, NULL,
1472 rte_socket_id(), 0);
1473 if (!vcrypto->wb_pool) {
1474 VC_LOG_ERR("Failed to creath mempool");
1479 dev->extern_data = vcrypto;
1480 dev->extern_ops.pre_msg_handle = NULL;
1481 dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1486 if (vcrypto->session_map)
1487 rte_hash_free(vcrypto->session_map);
1488 if (vcrypto->mbuf_pool)
1489 rte_mempool_free(vcrypto->mbuf_pool);
1497 rte_vhost_crypto_free(int vid)
1499 struct virtio_net *dev = get_device(vid);
1500 struct vhost_crypto *vcrypto;
1502 if (unlikely(dev == NULL)) {
1503 VC_LOG_ERR("Invalid vid %i", vid);
1507 vcrypto = dev->extern_data;
1508 if (unlikely(vcrypto == NULL)) {
1509 VC_LOG_ERR("Cannot find required data, is it initialized?");
1513 rte_hash_free(vcrypto->session_map);
1514 rte_mempool_free(vcrypto->mbuf_pool);
1515 rte_mempool_free(vcrypto->wb_pool);
1518 dev->extern_data = NULL;
1519 dev->extern_ops.pre_msg_handle = NULL;
1520 dev->extern_ops.post_msg_handle = NULL;
1526 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1528 struct virtio_net *dev = get_device(vid);
1529 struct vhost_crypto *vcrypto;
1531 if (unlikely(dev == NULL)) {
1532 VC_LOG_ERR("Invalid vid %i", vid);
1536 if (unlikely((uint32_t)option >=
1537 RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1538 VC_LOG_ERR("Invalid option %i", option);
1542 vcrypto = (struct vhost_crypto *)dev->extern_data;
1543 if (unlikely(vcrypto == NULL)) {
1544 VC_LOG_ERR("Cannot find required data, is it initialized?");
1548 if (vcrypto->option == (uint8_t)option)
1551 if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
1552 !(rte_mempool_full(vcrypto->wb_pool))) {
1553 VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1557 if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
1560 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1561 vcrypto->wb_pool = rte_mempool_create(name,
1562 VHOST_CRYPTO_MBUF_POOL_SIZE,
1563 sizeof(struct vhost_crypto_writeback_data),
1564 128, 0, NULL, NULL, NULL, NULL,
1565 rte_socket_id(), 0);
1566 if (!vcrypto->wb_pool) {
1567 VC_LOG_ERR("Failed to creath mbuf pool");
1571 rte_mempool_free(vcrypto->wb_pool);
1572 vcrypto->wb_pool = NULL;
1575 vcrypto->option = (uint8_t)option;
1581 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1582 struct rte_crypto_op **ops, uint16_t nb_ops)
1584 struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1585 struct vhost_crypto_desc descs[VHOST_CRYPTO_MAX_N_DESC];
1586 struct virtio_net *dev = get_device(vid);
1587 struct vhost_crypto *vcrypto;
1588 struct vhost_virtqueue *vq;
1594 if (unlikely(dev == NULL)) {
1595 VC_LOG_ERR("Invalid vid %i", vid);
1599 if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1600 VC_LOG_ERR("Invalid qid %u", qid);
1604 vcrypto = (struct vhost_crypto *)dev->extern_data;
1605 if (unlikely(vcrypto == NULL)) {
1606 VC_LOG_ERR("Cannot find required data, is it initialized?");
1610 vq = dev->virtqueue[qid];
1612 avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1613 start_idx = vq->last_used_idx;
1614 count = avail_idx - start_idx;
1615 count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1616 count = RTE_MIN(count, nb_ops);
1618 if (unlikely(count == 0))
1621 /* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1622 * we need only 1 mbuf as src and dst
1624 switch (vcrypto->option) {
1625 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1626 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1627 (void **)mbufs, count * 2) < 0)) {
1628 VC_LOG_ERR("Insufficient memory");
1632 for (i = 0; i < count; i++) {
1633 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1634 uint16_t desc_idx = vq->avail->ring[used_idx];
1635 struct vring_desc *head = &vq->desc[desc_idx];
1636 struct rte_crypto_op *op = ops[i];
1638 op->sym->m_src = mbufs[i * 2];
1639 op->sym->m_dst = mbufs[i * 2 + 1];
1640 op->sym->m_src->data_off = 0;
1641 op->sym->m_dst->data_off = 0;
1643 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1644 op, head, descs, used_idx) < 0))
1648 if (unlikely(i < count))
1649 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1650 (void **)&mbufs[i * 2],
1655 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1656 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1657 (void **)mbufs, count) < 0)) {
1658 VC_LOG_ERR("Insufficient memory");
1662 for (i = 0; i < count; i++) {
1663 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1664 uint16_t desc_idx = vq->avail->ring[used_idx];
1665 struct vring_desc *head = &vq->desc[desc_idx];
1666 struct rte_crypto_op *op = ops[i];
1668 op->sym->m_src = mbufs[i];
1669 op->sym->m_dst = NULL;
1670 op->sym->m_src->data_off = 0;
1672 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1673 op, head, descs, desc_idx) < 0))
1677 if (unlikely(i < count))
1678 rte_mempool_put_bulk(vcrypto->mbuf_pool,
1686 vq->last_used_idx += i;
1692 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1693 uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1695 struct rte_crypto_op **tmp_ops = ops;
1696 uint16_t count = 0, left = nb_ops;
1701 count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1703 if (unlikely(count == 0))
1706 tmp_ops = &tmp_ops[count];
1709 callfds[idx++] = callfd;
1711 if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1712 VC_LOG_ERR("Too many vqs");
1719 return nb_ops - left;