1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
4 #include <rte_malloc.h>
7 #include <rte_cryptodev.h>
10 #include "vhost_user.h"
11 #include "virtio_crypto.h"
13 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
14 sizeof(struct rte_crypto_sym_op))
16 #ifdef RTE_LIBRTE_VHOST_DEBUG
17 #define VC_LOG_ERR(fmt, args...) \
18 RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n", \
19 "Vhost-Crypto", __func__, __LINE__, ## args)
20 #define VC_LOG_INFO(fmt, args...) \
21 RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n", \
22 "Vhost-Crypto", __func__, __LINE__, ## args)
24 #define VC_LOG_DBG(fmt, args...) \
25 RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n", \
26 "Vhost-Crypto", __func__, __LINE__, ## args)
28 #define VC_LOG_ERR(fmt, args...) \
29 RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
30 #define VC_LOG_INFO(fmt, args...) \
31 RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
32 #define VC_LOG_DBG(fmt, args...)
36 cipher_algo_transform(uint32_t virtio_cipher_algo)
40 switch (virtio_cipher_algo) {
41 case VIRTIO_CRYPTO_CIPHER_AES_CBC:
42 ret = RTE_CRYPTO_CIPHER_AES_CBC;
44 case VIRTIO_CRYPTO_CIPHER_AES_CTR:
45 ret = RTE_CRYPTO_CIPHER_AES_CTR;
47 case VIRTIO_CRYPTO_CIPHER_DES_ECB:
48 ret = -VIRTIO_CRYPTO_NOTSUPP;
50 case VIRTIO_CRYPTO_CIPHER_DES_CBC:
51 ret = RTE_CRYPTO_CIPHER_DES_CBC;
53 case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
54 ret = RTE_CRYPTO_CIPHER_3DES_ECB;
56 case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
57 ret = RTE_CRYPTO_CIPHER_3DES_CBC;
59 case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
60 ret = RTE_CRYPTO_CIPHER_3DES_CTR;
62 case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
63 ret = RTE_CRYPTO_CIPHER_KASUMI_F8;
65 case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
66 ret = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
68 case VIRTIO_CRYPTO_CIPHER_AES_F8:
69 ret = RTE_CRYPTO_CIPHER_AES_F8;
71 case VIRTIO_CRYPTO_CIPHER_AES_XTS:
72 ret = RTE_CRYPTO_CIPHER_AES_XTS;
74 case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
75 ret = RTE_CRYPTO_CIPHER_ZUC_EEA3;
78 ret = -VIRTIO_CRYPTO_BADMSG;
86 auth_algo_transform(uint32_t virtio_auth_algo)
90 switch (virtio_auth_algo) {
92 case VIRTIO_CRYPTO_NO_MAC:
93 ret = RTE_CRYPTO_AUTH_NULL;
95 case VIRTIO_CRYPTO_MAC_HMAC_MD5:
96 ret = RTE_CRYPTO_AUTH_MD5_HMAC;
98 case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
99 ret = RTE_CRYPTO_AUTH_SHA1_HMAC;
101 case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
102 ret = RTE_CRYPTO_AUTH_SHA224_HMAC;
104 case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
105 ret = RTE_CRYPTO_AUTH_SHA256_HMAC;
107 case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
108 ret = RTE_CRYPTO_AUTH_SHA384_HMAC;
110 case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
111 ret = RTE_CRYPTO_AUTH_SHA512_HMAC;
113 case VIRTIO_CRYPTO_MAC_CMAC_3DES:
114 ret = -VIRTIO_CRYPTO_NOTSUPP;
116 case VIRTIO_CRYPTO_MAC_CMAC_AES:
117 ret = RTE_CRYPTO_AUTH_AES_CMAC;
119 case VIRTIO_CRYPTO_MAC_KASUMI_F9:
120 ret = RTE_CRYPTO_AUTH_KASUMI_F9;
122 case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
123 ret = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
125 case VIRTIO_CRYPTO_MAC_GMAC_AES:
126 ret = RTE_CRYPTO_AUTH_AES_GMAC;
128 case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
129 ret = -VIRTIO_CRYPTO_NOTSUPP;
131 case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
132 ret = RTE_CRYPTO_AUTH_AES_CBC_MAC;
134 case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
135 ret = -VIRTIO_CRYPTO_NOTSUPP;
137 case VIRTIO_CRYPTO_MAC_XCBC_AES:
138 ret = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
141 ret = -VIRTIO_CRYPTO_BADMSG;
148 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
153 case RTE_CRYPTO_CIPHER_3DES_CBC:
156 case RTE_CRYPTO_CIPHER_3DES_CTR:
159 case RTE_CRYPTO_CIPHER_3DES_ECB:
162 case RTE_CRYPTO_CIPHER_AES_CBC:
166 /* TODO: add common algos */
177 * vhost_crypto struct is used to maintain a number of virtio_cryptos and
178 * one DPDK crypto device that deals with all crypto workloads. It is declared
179 * here and defined in vhost_crypto.c
181 struct vhost_crypto {
182 /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
185 struct rte_hash *session_map;
186 struct rte_mempool *mbuf_pool;
187 struct rte_mempool *sess_pool;
189 /** DPDK cryptodev ID */
193 uint64_t last_session_id;
195 uint64_t cache_session_id;
196 struct rte_cryptodev_sym_session *cache_session;
197 /** socket id for the device */
200 struct virtio_net *dev;
203 } __rte_cache_aligned;
206 transform_cipher_param(struct rte_crypto_sym_xform *xform,
207 VhostUserCryptoSessionParam *param)
211 ret = cipher_algo_transform(param->cipher_algo);
212 if (unlikely(ret < 0))
215 xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
216 xform->cipher.algo = (uint32_t)ret;
217 xform->cipher.key.length = param->cipher_key_len;
218 if (xform->cipher.key.length > 0)
219 xform->cipher.key.data = param->cipher_key_buf;
220 if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
221 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
222 else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
223 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
225 VC_LOG_DBG("Bad operation type");
226 return -VIRTIO_CRYPTO_BADMSG;
229 ret = get_iv_len(xform->cipher.algo);
230 if (unlikely(ret < 0))
232 xform->cipher.iv.length = (uint16_t)ret;
233 xform->cipher.iv.offset = IV_OFFSET;
238 transform_chain_param(struct rte_crypto_sym_xform *xforms,
239 VhostUserCryptoSessionParam *param)
241 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
244 switch (param->chaining_dir) {
245 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
247 xform_cipher = xforms->next;
248 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
249 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
251 case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
252 xform_cipher = xforms;
253 xform_auth = xforms->next;
254 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
255 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
258 return -VIRTIO_CRYPTO_BADMSG;
262 ret = cipher_algo_transform(param->cipher_algo);
263 if (unlikely(ret < 0))
265 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
266 xform_cipher->cipher.algo = (uint32_t)ret;
267 xform_cipher->cipher.key.length = param->cipher_key_len;
268 xform_cipher->cipher.key.data = param->cipher_key_buf;
269 ret = get_iv_len(xform_cipher->cipher.algo);
270 if (unlikely(ret < 0))
272 xform_cipher->cipher.iv.length = (uint16_t)ret;
273 xform_cipher->cipher.iv.offset = IV_OFFSET;
276 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
277 ret = auth_algo_transform(param->hash_algo);
278 if (unlikely(ret < 0))
280 xform_auth->auth.algo = (uint32_t)ret;
281 xform_auth->auth.digest_length = param->digest_len;
282 xform_auth->auth.key.length = param->auth_key_len;
283 xform_auth->auth.key.data = param->auth_key_buf;
289 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
290 VhostUserCryptoSessionParam *sess_param)
292 struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
293 struct rte_cryptodev_sym_session *session;
296 switch (sess_param->op_type) {
297 case VIRTIO_CRYPTO_SYM_OP_NONE:
298 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
299 ret = transform_cipher_param(&xform1, sess_param);
301 VC_LOG_ERR("Error transform session msg (%i)", ret);
302 sess_param->session_id = ret;
306 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
307 if (unlikely(sess_param->hash_mode !=
308 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
309 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
310 VC_LOG_ERR("Error transform session message (%i)",
311 -VIRTIO_CRYPTO_NOTSUPP);
315 xform1.next = &xform2;
317 ret = transform_chain_param(&xform1, sess_param);
319 VC_LOG_ERR("Error transform session message (%i)", ret);
320 sess_param->session_id = ret;
326 VC_LOG_ERR("Algorithm not yet supported");
327 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
331 session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
333 VC_LOG_ERR("Failed to create session");
334 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
338 if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
339 vcrypto->sess_pool) < 0) {
340 VC_LOG_ERR("Failed to initialize session");
341 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
345 /* insert hash to map */
346 if (rte_hash_add_key_data(vcrypto->session_map,
347 &vcrypto->last_session_id, session) < 0) {
348 VC_LOG_ERR("Failed to insert session to hash table");
350 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
351 VC_LOG_ERR("Failed to clear session");
353 if (rte_cryptodev_sym_session_free(session) < 0)
354 VC_LOG_ERR("Failed to free session");
356 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
360 VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
361 vcrypto->last_session_id, vcrypto->dev->vid);
363 sess_param->session_id = vcrypto->last_session_id;
364 vcrypto->last_session_id++;
368 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
370 struct rte_cryptodev_sym_session *session;
371 uint64_t sess_id = session_id;
374 ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
377 if (unlikely(ret < 0)) {
378 VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
379 return -VIRTIO_CRYPTO_INVSESS;
382 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
383 VC_LOG_DBG("Failed to clear session");
384 return -VIRTIO_CRYPTO_ERR;
387 if (rte_cryptodev_sym_session_free(session) < 0) {
388 VC_LOG_DBG("Failed to free session");
389 return -VIRTIO_CRYPTO_ERR;
392 if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
393 VC_LOG_DBG("Failed to delete session from hash table.");
394 return -VIRTIO_CRYPTO_ERR;
397 VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
404 vhost_crypto_msg_post_handler(int vid, void *msg, uint32_t *require_reply)
406 struct virtio_net *dev = get_device(vid);
407 struct vhost_crypto *vcrypto;
408 VhostUserMsg *vmsg = msg;
411 if (dev == NULL || require_reply == NULL) {
412 VC_LOG_ERR("Invalid vid %i", vid);
416 vcrypto = dev->extern_data;
417 if (vcrypto == NULL) {
418 VC_LOG_ERR("Cannot find required data, is it initialized?");
424 if (vmsg->request.master == VHOST_USER_CRYPTO_CREATE_SESS) {
425 vhost_crypto_create_sess(vcrypto,
426 &vmsg->payload.crypto_session);
428 } else if (vmsg->request.master == VHOST_USER_CRYPTO_CLOSE_SESS)
429 ret = vhost_crypto_close_sess(vcrypto, vmsg->payload.u64);