8652a778a347cbd933cd561c39fde7e156f0b351
[dpdk.git] / lib / librte_vhost / vhost_crypto.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 #include <rte_malloc.h>
5 #include <rte_hash.h>
6 #include <rte_jhash.h>
7 #include <rte_mbuf.h>
8 #include <rte_cryptodev.h>
9
10 #include "rte_vhost_crypto.h"
11 #include "vhost.h"
12 #include "vhost_user.h"
13 #include "virtio_crypto.h"
14
15 #define INHDR_LEN               (sizeof(struct virtio_crypto_inhdr))
16 #define IV_OFFSET               (sizeof(struct rte_crypto_op) + \
17                                 sizeof(struct rte_crypto_sym_op))
18
19 #ifdef RTE_LIBRTE_VHOST_DEBUG
20 #define VC_LOG_ERR(fmt, args...)                                \
21         RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n",     \
22                 "Vhost-Crypto", __func__, __LINE__, ## args)
23 #define VC_LOG_INFO(fmt, args...)                               \
24         RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n",    \
25                 "Vhost-Crypto", __func__, __LINE__, ## args)
26
27 #define VC_LOG_DBG(fmt, args...)                                \
28         RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n",   \
29                 "Vhost-Crypto", __func__, __LINE__, ## args)
30 #else
31 #define VC_LOG_ERR(fmt, args...)                                \
32         RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
33 #define VC_LOG_INFO(fmt, args...)                               \
34         RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
35 #define VC_LOG_DBG(fmt, args...)
36 #endif
37
38 #define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) |       \
39                 (1 << VIRTIO_RING_F_INDIRECT_DESC) |                    \
40                 (1 << VIRTIO_RING_F_EVENT_IDX) |                        \
41                 (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) |                   \
42                 (1 << VIRTIO_CRYPTO_SERVICE_MAC) |                      \
43                 (1 << VIRTIO_NET_F_CTRL_VQ))
44
45 #define IOVA_TO_VVA(t, r, a, l, p)                                      \
46         ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
47
48 static int
49 cipher_algo_transform(uint32_t virtio_cipher_algo,
50                 enum rte_crypto_cipher_algorithm *algo)
51 {
52         switch (virtio_cipher_algo) {
53         case VIRTIO_CRYPTO_CIPHER_AES_CBC:
54                 *algo = RTE_CRYPTO_CIPHER_AES_CBC;
55                 break;
56         case VIRTIO_CRYPTO_CIPHER_AES_CTR:
57                 *algo = RTE_CRYPTO_CIPHER_AES_CTR;
58                 break;
59         case VIRTIO_CRYPTO_CIPHER_DES_ECB:
60                 *algo = -VIRTIO_CRYPTO_NOTSUPP;
61                 break;
62         case VIRTIO_CRYPTO_CIPHER_DES_CBC:
63                 *algo = RTE_CRYPTO_CIPHER_DES_CBC;
64                 break;
65         case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
66                 *algo = RTE_CRYPTO_CIPHER_3DES_ECB;
67                 break;
68         case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
69                 *algo = RTE_CRYPTO_CIPHER_3DES_CBC;
70                 break;
71         case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
72                 *algo = RTE_CRYPTO_CIPHER_3DES_CTR;
73                 break;
74         case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
75                 *algo = RTE_CRYPTO_CIPHER_KASUMI_F8;
76                 break;
77         case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
78                 *algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
79                 break;
80         case VIRTIO_CRYPTO_CIPHER_AES_F8:
81                 *algo = RTE_CRYPTO_CIPHER_AES_F8;
82                 break;
83         case VIRTIO_CRYPTO_CIPHER_AES_XTS:
84                 *algo = RTE_CRYPTO_CIPHER_AES_XTS;
85                 break;
86         case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
87                 *algo = RTE_CRYPTO_CIPHER_ZUC_EEA3;
88                 break;
89         default:
90                 return -VIRTIO_CRYPTO_BADMSG;
91                 break;
92         }
93
94         return 0;
95 }
96
97 static int
98 auth_algo_transform(uint32_t virtio_auth_algo,
99                 enum rte_crypto_auth_algorithm *algo)
100 {
101         switch (virtio_auth_algo) {
102         case VIRTIO_CRYPTO_NO_MAC:
103                 *algo = RTE_CRYPTO_AUTH_NULL;
104                 break;
105         case VIRTIO_CRYPTO_MAC_HMAC_MD5:
106                 *algo = RTE_CRYPTO_AUTH_MD5_HMAC;
107                 break;
108         case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
109                 *algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
110                 break;
111         case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
112                 *algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
113                 break;
114         case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
115                 *algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
116                 break;
117         case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
118                 *algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
119                 break;
120         case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
121                 *algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
122                 break;
123         case VIRTIO_CRYPTO_MAC_CMAC_AES:
124                 *algo = RTE_CRYPTO_AUTH_AES_CMAC;
125                 break;
126         case VIRTIO_CRYPTO_MAC_KASUMI_F9:
127                 *algo = RTE_CRYPTO_AUTH_KASUMI_F9;
128                 break;
129         case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
130                 *algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
131                 break;
132         case VIRTIO_CRYPTO_MAC_GMAC_AES:
133                 *algo = RTE_CRYPTO_AUTH_AES_GMAC;
134                 break;
135         case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
136                 *algo = RTE_CRYPTO_AUTH_AES_CBC_MAC;
137                 break;
138         case VIRTIO_CRYPTO_MAC_XCBC_AES:
139                 *algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
140                 break;
141         case VIRTIO_CRYPTO_MAC_CMAC_3DES:
142         case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
143         case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
144                 return -VIRTIO_CRYPTO_NOTSUPP;
145         default:
146                 return -VIRTIO_CRYPTO_BADMSG;
147         }
148
149         return 0;
150 }
151
152 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
153 {
154         int len;
155
156         switch (algo) {
157         case RTE_CRYPTO_CIPHER_3DES_CBC:
158                 len = 8;
159                 break;
160         case RTE_CRYPTO_CIPHER_3DES_CTR:
161                 len = 8;
162                 break;
163         case RTE_CRYPTO_CIPHER_3DES_ECB:
164                 len = 8;
165                 break;
166         case RTE_CRYPTO_CIPHER_AES_CBC:
167                 len = 16;
168                 break;
169
170         /* TODO: add common algos */
171
172         default:
173                 len = -1;
174                 break;
175         }
176
177         return len;
178 }
179
180 /**
181  * vhost_crypto struct is used to maintain a number of virtio_cryptos and
182  * one DPDK crypto device that deals with all crypto workloads. It is declared
183  * here and defined in vhost_crypto.c
184  */
185 struct vhost_crypto {
186         /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
187          *  session ID.
188          */
189         struct rte_hash *session_map;
190         struct rte_mempool *mbuf_pool;
191         struct rte_mempool *sess_pool;
192         struct rte_mempool *sess_priv_pool;
193         struct rte_mempool *wb_pool;
194
195         /** DPDK cryptodev ID */
196         uint8_t cid;
197         uint16_t nb_qps;
198
199         uint64_t last_session_id;
200
201         uint64_t cache_session_id;
202         struct rte_cryptodev_sym_session *cache_session;
203         /** socket id for the device */
204         int socket_id;
205
206         struct virtio_net *dev;
207
208         uint8_t option;
209 } __rte_cache_aligned;
210
211 struct vhost_crypto_writeback_data {
212         uint8_t *src;
213         uint8_t *dst;
214         uint64_t len;
215         struct vhost_crypto_writeback_data *next;
216 };
217
218 struct vhost_crypto_data_req {
219         struct vring_desc *head;
220         struct virtio_net *dev;
221         struct virtio_crypto_inhdr *inhdr;
222         struct vhost_virtqueue *vq;
223         struct vhost_crypto_writeback_data *wb;
224         struct rte_mempool *wb_pool;
225         uint16_t desc_idx;
226         uint16_t len;
227         uint16_t zero_copy;
228 };
229
230 static int
231 transform_cipher_param(struct rte_crypto_sym_xform *xform,
232                 VhostUserCryptoSessionParam *param)
233 {
234         int ret;
235
236         ret = cipher_algo_transform(param->cipher_algo, &xform->cipher.algo);
237         if (unlikely(ret < 0))
238                 return ret;
239
240         xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
241         xform->cipher.key.length = param->cipher_key_len;
242         if (xform->cipher.key.length > 0)
243                 xform->cipher.key.data = param->cipher_key_buf;
244         if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
245                 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
246         else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
247                 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
248         else {
249                 VC_LOG_DBG("Bad operation type");
250                 return -VIRTIO_CRYPTO_BADMSG;
251         }
252
253         ret = get_iv_len(xform->cipher.algo);
254         if (unlikely(ret < 0))
255                 return ret;
256         xform->cipher.iv.length = (uint16_t)ret;
257         xform->cipher.iv.offset = IV_OFFSET;
258         return 0;
259 }
260
261 static int
262 transform_chain_param(struct rte_crypto_sym_xform *xforms,
263                 VhostUserCryptoSessionParam *param)
264 {
265         struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
266         int ret;
267
268         switch (param->chaining_dir) {
269         case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
270                 xform_auth = xforms;
271                 xform_cipher = xforms->next;
272                 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
273                 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
274                 break;
275         case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
276                 xform_cipher = xforms;
277                 xform_auth = xforms->next;
278                 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
279                 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
280                 break;
281         default:
282                 return -VIRTIO_CRYPTO_BADMSG;
283         }
284
285         /* cipher */
286         ret = cipher_algo_transform(param->cipher_algo,
287                         &xform_cipher->cipher.algo);
288         if (unlikely(ret < 0))
289                 return ret;
290         xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
291         xform_cipher->cipher.key.length = param->cipher_key_len;
292         xform_cipher->cipher.key.data = param->cipher_key_buf;
293         ret = get_iv_len(xform_cipher->cipher.algo);
294         if (unlikely(ret < 0))
295                 return ret;
296         xform_cipher->cipher.iv.length = (uint16_t)ret;
297         xform_cipher->cipher.iv.offset = IV_OFFSET;
298
299         /* auth */
300         xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
301         ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo);
302         if (unlikely(ret < 0))
303                 return ret;
304         xform_auth->auth.digest_length = param->digest_len;
305         xform_auth->auth.key.length = param->auth_key_len;
306         xform_auth->auth.key.data = param->auth_key_buf;
307
308         return 0;
309 }
310
311 static void
312 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
313                 VhostUserCryptoSessionParam *sess_param)
314 {
315         struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
316         struct rte_cryptodev_sym_session *session;
317         int ret;
318
319         switch (sess_param->op_type) {
320         case VIRTIO_CRYPTO_SYM_OP_NONE:
321         case VIRTIO_CRYPTO_SYM_OP_CIPHER:
322                 ret = transform_cipher_param(&xform1, sess_param);
323                 if (unlikely(ret)) {
324                         VC_LOG_ERR("Error transform session msg (%i)", ret);
325                         sess_param->session_id = ret;
326                         return;
327                 }
328                 break;
329         case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
330                 if (unlikely(sess_param->hash_mode !=
331                                 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
332                         sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
333                         VC_LOG_ERR("Error transform session message (%i)",
334                                         -VIRTIO_CRYPTO_NOTSUPP);
335                         return;
336                 }
337
338                 xform1.next = &xform2;
339
340                 ret = transform_chain_param(&xform1, sess_param);
341                 if (unlikely(ret)) {
342                         VC_LOG_ERR("Error transform session message (%i)", ret);
343                         sess_param->session_id = ret;
344                         return;
345                 }
346
347                 break;
348         default:
349                 VC_LOG_ERR("Algorithm not yet supported");
350                 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
351                 return;
352         }
353
354         session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
355         if (!session) {
356                 VC_LOG_ERR("Failed to create session");
357                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
358                 return;
359         }
360
361         if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
362                         vcrypto->sess_priv_pool) < 0) {
363                 VC_LOG_ERR("Failed to initialize session");
364                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
365                 return;
366         }
367
368         /* insert hash to map */
369         if (rte_hash_add_key_data(vcrypto->session_map,
370                         &vcrypto->last_session_id, session) < 0) {
371                 VC_LOG_ERR("Failed to insert session to hash table");
372
373                 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
374                         VC_LOG_ERR("Failed to clear session");
375                 else {
376                         if (rte_cryptodev_sym_session_free(session) < 0)
377                                 VC_LOG_ERR("Failed to free session");
378                 }
379                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
380                 return;
381         }
382
383         VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
384                         vcrypto->last_session_id, vcrypto->dev->vid);
385
386         sess_param->session_id = vcrypto->last_session_id;
387         vcrypto->last_session_id++;
388 }
389
390 static int
391 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
392 {
393         struct rte_cryptodev_sym_session *session;
394         uint64_t sess_id = session_id;
395         int ret;
396
397         ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
398                         (void **)&session);
399
400         if (unlikely(ret < 0)) {
401                 VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
402                 return -VIRTIO_CRYPTO_INVSESS;
403         }
404
405         if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
406                 VC_LOG_DBG("Failed to clear session");
407                 return -VIRTIO_CRYPTO_ERR;
408         }
409
410         if (rte_cryptodev_sym_session_free(session) < 0) {
411                 VC_LOG_DBG("Failed to free session");
412                 return -VIRTIO_CRYPTO_ERR;
413         }
414
415         if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
416                 VC_LOG_DBG("Failed to delete session from hash table.");
417                 return -VIRTIO_CRYPTO_ERR;
418         }
419
420         VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
421                         vcrypto->dev->vid);
422
423         return 0;
424 }
425
426 static enum rte_vhost_msg_result
427 vhost_crypto_msg_post_handler(int vid, void *msg)
428 {
429         struct virtio_net *dev = get_device(vid);
430         struct vhost_crypto *vcrypto;
431         VhostUserMsg *vmsg = msg;
432         enum rte_vhost_msg_result ret = RTE_VHOST_MSG_RESULT_OK;
433
434         if (dev == NULL) {
435                 VC_LOG_ERR("Invalid vid %i", vid);
436                 return RTE_VHOST_MSG_RESULT_ERR;
437         }
438
439         vcrypto = dev->extern_data;
440         if (vcrypto == NULL) {
441                 VC_LOG_ERR("Cannot find required data, is it initialized?");
442                 return RTE_VHOST_MSG_RESULT_ERR;
443         }
444
445         switch (vmsg->request.master) {
446         case VHOST_USER_CRYPTO_CREATE_SESS:
447                 vhost_crypto_create_sess(vcrypto,
448                                 &vmsg->payload.crypto_session);
449                 vmsg->fd_num = 0;
450                 ret = RTE_VHOST_MSG_RESULT_REPLY;
451                 break;
452         case VHOST_USER_CRYPTO_CLOSE_SESS:
453                 if (vhost_crypto_close_sess(vcrypto, vmsg->payload.u64))
454                         ret = RTE_VHOST_MSG_RESULT_ERR;
455                 break;
456         default:
457                 ret = RTE_VHOST_MSG_RESULT_NOT_HANDLED;
458                 break;
459         }
460
461         return ret;
462 }
463
464 static __rte_always_inline struct vring_desc *
465 find_write_desc(struct vring_desc *head, struct vring_desc *desc,
466                 uint32_t *nb_descs, uint32_t vq_size)
467 {
468         if (desc->flags & VRING_DESC_F_WRITE)
469                 return desc;
470
471         while (desc->flags & VRING_DESC_F_NEXT) {
472                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
473                         return NULL;
474                 (*nb_descs)--;
475
476                 desc = &head[desc->next];
477                 if (desc->flags & VRING_DESC_F_WRITE)
478                         return desc;
479         }
480
481         return NULL;
482 }
483
484 static struct virtio_crypto_inhdr *
485 reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
486                 uint32_t *nb_descs, uint32_t vq_size)
487 {
488         uint64_t dlen;
489         struct virtio_crypto_inhdr *inhdr;
490
491         while (desc->flags & VRING_DESC_F_NEXT) {
492                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
493                         return NULL;
494                 (*nb_descs)--;
495                 desc = &vc_req->head[desc->next];
496         }
497
498         dlen = desc->len;
499         inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
500                         &dlen, VHOST_ACCESS_WO);
501         if (unlikely(!inhdr || dlen != desc->len))
502                 return NULL;
503
504         return inhdr;
505 }
506
507 static __rte_always_inline int
508 move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
509                 uint32_t size, uint32_t *nb_descs, uint32_t vq_size)
510 {
511         struct vring_desc *desc = *cur_desc;
512         int left = size - desc->len;
513
514         while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
515                 (*nb_descs)--;
516                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
517                         return -1;
518
519                 desc = &head[desc->next];
520                 rte_prefetch0(&head[desc->next]);
521                 left -= desc->len;
522         }
523
524         if (unlikely(left > 0))
525                 return -1;
526
527         if (unlikely(*nb_descs == 0))
528                 *cur_desc = NULL;
529         else {
530                 if (unlikely(desc->next >= vq_size))
531                         return -1;
532                 *cur_desc = &head[desc->next];
533         }
534
535         return 0;
536 }
537
538 static __rte_always_inline void *
539 get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
540                 uint8_t perm)
541 {
542         void *data;
543         uint64_t dlen = cur_desc->len;
544
545         data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
546         if (unlikely(!data || dlen != cur_desc->len)) {
547                 VC_LOG_ERR("Failed to map object");
548                 return NULL;
549         }
550
551         return data;
552 }
553
554 static int
555 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
556                 struct vring_desc **cur_desc, uint32_t size,
557                 uint32_t *nb_descs, uint32_t vq_size)
558 {
559         struct vring_desc *desc = *cur_desc;
560         uint64_t remain, addr, dlen, len;
561         uint32_t to_copy;
562         uint8_t *data = dst_data;
563         uint8_t *src;
564         int left = size;
565
566         to_copy = RTE_MIN(desc->len, (uint32_t)left);
567         dlen = to_copy;
568         src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
569                         VHOST_ACCESS_RO);
570         if (unlikely(!src || !dlen))
571                 return -1;
572
573         rte_memcpy((uint8_t *)data, src, dlen);
574         data += dlen;
575
576         if (unlikely(dlen < to_copy)) {
577                 remain = to_copy - dlen;
578                 addr = desc->addr + dlen;
579
580                 while (remain) {
581                         len = remain;
582                         src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
583                                         VHOST_ACCESS_RO);
584                         if (unlikely(!src || !len)) {
585                                 VC_LOG_ERR("Failed to map descriptor");
586                                 return -1;
587                         }
588
589                         rte_memcpy(data, src, len);
590                         addr += len;
591                         remain -= len;
592                         data += len;
593                 }
594         }
595
596         left -= to_copy;
597
598         while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
599                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
600                         VC_LOG_ERR("Invalid descriptors");
601                         return -1;
602                 }
603                 (*nb_descs)--;
604
605                 desc = &vc_req->head[desc->next];
606                 rte_prefetch0(&vc_req->head[desc->next]);
607                 to_copy = RTE_MIN(desc->len, (uint32_t)left);
608                 dlen = desc->len;
609                 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
610                                 VHOST_ACCESS_RO);
611                 if (unlikely(!src || !dlen)) {
612                         VC_LOG_ERR("Failed to map descriptor");
613                         return -1;
614                 }
615
616                 rte_memcpy(data, src, dlen);
617                 data += dlen;
618
619                 if (unlikely(dlen < to_copy)) {
620                         remain = to_copy - dlen;
621                         addr = desc->addr + dlen;
622
623                         while (remain) {
624                                 len = remain;
625                                 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
626                                                 VHOST_ACCESS_RO);
627                                 if (unlikely(!src || !len)) {
628                                         VC_LOG_ERR("Failed to map descriptor");
629                                         return -1;
630                                 }
631
632                                 rte_memcpy(data, src, len);
633                                 addr += len;
634                                 remain -= len;
635                                 data += len;
636                         }
637                 }
638
639                 left -= to_copy;
640         }
641
642         if (unlikely(left > 0)) {
643                 VC_LOG_ERR("Incorrect virtio descriptor");
644                 return -1;
645         }
646
647         if (unlikely(*nb_descs == 0))
648                 *cur_desc = NULL;
649         else {
650                 if (unlikely(desc->next >= vq_size))
651                         return -1;
652                 *cur_desc = &vc_req->head[desc->next];
653         }
654
655         return 0;
656 }
657
658 static void
659 write_back_data(struct vhost_crypto_data_req *vc_req)
660 {
661         struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
662
663         while (wb_data) {
664                 rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
665                 wb_last = wb_data;
666                 wb_data = wb_data->next;
667                 rte_mempool_put(vc_req->wb_pool, wb_last);
668         }
669 }
670
671 static void
672 free_wb_data(struct vhost_crypto_writeback_data *wb_data,
673                 struct rte_mempool *mp)
674 {
675         while (wb_data->next != NULL)
676                 free_wb_data(wb_data->next, mp);
677
678         rte_mempool_put(mp, wb_data);
679 }
680
681 /**
682  * The function will allocate a vhost_crypto_writeback_data linked list
683  * containing the source and destination data pointers for the write back
684  * operation after dequeued from Cryptodev PMD queues.
685  *
686  * @param vc_req
687  *   The vhost crypto data request pointer
688  * @param cur_desc
689  *   The pointer of the current in use descriptor pointer. The content of
690  *   cur_desc is expected to be updated after the function execution.
691  * @param end_wb_data
692  *   The last write back data element to be returned. It is used only in cipher
693  *   and hash chain operations.
694  * @param src
695  *   The source data pointer
696  * @param offset
697  *   The offset to both source and destination data. For source data the offset
698  *   is the number of bytes between src and start point of cipher operation. For
699  *   destination data the offset is the number of bytes from *cur_desc->addr
700  *   to the point where the src will be written to.
701  * @param write_back_len
702  *   The size of the write back length.
703  * @return
704  *   The pointer to the start of the write back data linked list.
705  */
706 static struct vhost_crypto_writeback_data *
707 prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
708                 struct vring_desc **cur_desc,
709                 struct vhost_crypto_writeback_data **end_wb_data,
710                 uint8_t *src,
711                 uint32_t offset,
712                 uint64_t write_back_len,
713                 uint32_t *nb_descs, uint32_t vq_size)
714 {
715         struct vhost_crypto_writeback_data *wb_data, *head;
716         struct vring_desc *desc = *cur_desc;
717         uint64_t dlen;
718         uint8_t *dst;
719         int ret;
720
721         ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
722         if (unlikely(ret < 0)) {
723                 VC_LOG_ERR("no memory");
724                 goto error_exit;
725         }
726
727         wb_data = head;
728
729         if (likely(desc->len > offset)) {
730                 wb_data->src = src + offset;
731                 dlen = desc->len;
732                 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
733                         &dlen, VHOST_ACCESS_RW) + offset;
734                 if (unlikely(!dst || dlen != desc->len)) {
735                         VC_LOG_ERR("Failed to map descriptor");
736                         goto error_exit;
737                 }
738
739                 wb_data->dst = dst;
740                 wb_data->len = desc->len - offset;
741                 write_back_len -= wb_data->len;
742                 src += offset + wb_data->len;
743                 offset = 0;
744
745                 if (unlikely(write_back_len)) {
746                         ret = rte_mempool_get(vc_req->wb_pool,
747                                         (void **)&(wb_data->next));
748                         if (unlikely(ret < 0)) {
749                                 VC_LOG_ERR("no memory");
750                                 goto error_exit;
751                         }
752
753                         wb_data = wb_data->next;
754                 } else
755                         wb_data->next = NULL;
756         } else
757                 offset -= desc->len;
758
759         while (write_back_len) {
760                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
761                         VC_LOG_ERR("Invalid descriptors");
762                         goto error_exit;
763                 }
764                 (*nb_descs)--;
765
766                 desc = &vc_req->head[desc->next];
767                 if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
768                         VC_LOG_ERR("incorrect descriptor");
769                         goto error_exit;
770                 }
771
772                 if (desc->len <= offset) {
773                         offset -= desc->len;
774                         continue;
775                 }
776
777                 dlen = desc->len;
778                 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
779                                 VHOST_ACCESS_RW) + offset;
780                 if (unlikely(dst == NULL || dlen != desc->len)) {
781                         VC_LOG_ERR("Failed to map descriptor");
782                         goto error_exit;
783                 }
784
785                 wb_data->src = src;
786                 wb_data->dst = dst;
787                 wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
788                 write_back_len -= wb_data->len;
789                 src += wb_data->len;
790                 offset = 0;
791
792                 if (write_back_len) {
793                         ret = rte_mempool_get(vc_req->wb_pool,
794                                         (void **)&(wb_data->next));
795                         if (unlikely(ret < 0)) {
796                                 VC_LOG_ERR("no memory");
797                                 goto error_exit;
798                         }
799
800                         wb_data = wb_data->next;
801                 } else
802                         wb_data->next = NULL;
803         }
804
805         if (unlikely(*nb_descs == 0))
806                 *cur_desc = NULL;
807         else {
808                 if (unlikely(desc->next >= vq_size))
809                         goto error_exit;
810                 *cur_desc = &vc_req->head[desc->next];
811         }
812
813         *end_wb_data = wb_data;
814
815         return head;
816
817 error_exit:
818         if (head)
819                 free_wb_data(head, vc_req->wb_pool);
820
821         return NULL;
822 }
823
824 static uint8_t
825 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
826                 struct vhost_crypto_data_req *vc_req,
827                 struct virtio_crypto_cipher_data_req *cipher,
828                 struct vring_desc *cur_desc,
829                 uint32_t *nb_descs, uint32_t vq_size)
830 {
831         struct vring_desc *desc = cur_desc;
832         struct vhost_crypto_writeback_data *ewb = NULL;
833         struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
834         uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
835         uint8_t ret = 0;
836
837         /* prepare */
838         /* iv */
839         if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
840                         nb_descs, vq_size) < 0)) {
841                 ret = VIRTIO_CRYPTO_BADMSG;
842                 goto error_exit;
843         }
844
845         m_src->data_len = cipher->para.src_data_len;
846
847         switch (vcrypto->option) {
848         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
849                 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
850                                 cipher->para.src_data_len);
851                 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
852                 if (unlikely(m_src->buf_iova == 0 ||
853                                 m_src->buf_addr == NULL)) {
854                         VC_LOG_ERR("zero_copy may fail due to cross page data");
855                         ret = VIRTIO_CRYPTO_ERR;
856                         goto error_exit;
857                 }
858
859                 if (unlikely(move_desc(vc_req->head, &desc,
860                                 cipher->para.src_data_len, nb_descs,
861                                 vq_size) < 0)) {
862                         VC_LOG_ERR("Incorrect descriptor");
863                         ret = VIRTIO_CRYPTO_ERR;
864                         goto error_exit;
865                 }
866
867                 break;
868         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
869                 vc_req->wb_pool = vcrypto->wb_pool;
870
871                 if (unlikely(cipher->para.src_data_len >
872                                 RTE_MBUF_DEFAULT_BUF_SIZE)) {
873                         VC_LOG_ERR("Not enough space to do data copy");
874                         ret = VIRTIO_CRYPTO_ERR;
875                         goto error_exit;
876                 }
877                 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
878                                 vc_req, &desc, cipher->para.src_data_len,
879                                 nb_descs, vq_size) < 0)) {
880                         ret = VIRTIO_CRYPTO_BADMSG;
881                         goto error_exit;
882                 }
883                 break;
884         default:
885                 ret = VIRTIO_CRYPTO_BADMSG;
886                 goto error_exit;
887         }
888
889         /* dst */
890         desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
891         if (unlikely(!desc)) {
892                 VC_LOG_ERR("Cannot find write location");
893                 ret = VIRTIO_CRYPTO_BADMSG;
894                 goto error_exit;
895         }
896
897         switch (vcrypto->option) {
898         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
899                 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
900                                 desc->addr, cipher->para.dst_data_len);
901                 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
902                 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
903                         VC_LOG_ERR("zero_copy may fail due to cross page data");
904                         ret = VIRTIO_CRYPTO_ERR;
905                         goto error_exit;
906                 }
907
908                 if (unlikely(move_desc(vc_req->head, &desc,
909                                 cipher->para.dst_data_len,
910                                 nb_descs, vq_size) < 0)) {
911                         VC_LOG_ERR("Incorrect descriptor");
912                         ret = VIRTIO_CRYPTO_ERR;
913                         goto error_exit;
914                 }
915
916                 m_dst->data_len = cipher->para.dst_data_len;
917                 break;
918         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
919                 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
920                                 rte_pktmbuf_mtod(m_src, uint8_t *), 0,
921                                 cipher->para.dst_data_len, nb_descs, vq_size);
922                 if (unlikely(vc_req->wb == NULL)) {
923                         ret = VIRTIO_CRYPTO_ERR;
924                         goto error_exit;
925                 }
926
927                 break;
928         default:
929                 ret = VIRTIO_CRYPTO_BADMSG;
930                 goto error_exit;
931         }
932
933         /* src data */
934         op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
935         op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
936
937         op->sym->cipher.data.offset = 0;
938         op->sym->cipher.data.length = cipher->para.src_data_len;
939
940         vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
941         if (unlikely(vc_req->inhdr == NULL)) {
942                 ret = VIRTIO_CRYPTO_BADMSG;
943                 goto error_exit;
944         }
945
946         vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
947         vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
948
949         return 0;
950
951 error_exit:
952         if (vc_req->wb)
953                 free_wb_data(vc_req->wb, vc_req->wb_pool);
954
955         vc_req->len = INHDR_LEN;
956         return ret;
957 }
958
959 static uint8_t
960 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
961                 struct vhost_crypto_data_req *vc_req,
962                 struct virtio_crypto_alg_chain_data_req *chain,
963                 struct vring_desc *cur_desc,
964                 uint32_t *nb_descs, uint32_t vq_size)
965 {
966         struct vring_desc *desc = cur_desc, *digest_desc;
967         struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
968         struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
969         uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
970         uint32_t digest_offset;
971         void *digest_addr;
972         uint8_t ret = 0;
973
974         /* prepare */
975         /* iv */
976         if (unlikely(copy_data(iv_data, vc_req, &desc,
977                         chain->para.iv_len, nb_descs, vq_size) < 0)) {
978                 ret = VIRTIO_CRYPTO_BADMSG;
979                 goto error_exit;
980         }
981
982         m_src->data_len = chain->para.src_data_len;
983
984         switch (vcrypto->option) {
985         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
986                 m_dst->data_len = chain->para.dst_data_len;
987
988                 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
989                                 chain->para.src_data_len);
990                 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
991                 if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
992                         VC_LOG_ERR("zero_copy may fail due to cross page data");
993                         ret = VIRTIO_CRYPTO_ERR;
994                         goto error_exit;
995                 }
996
997                 if (unlikely(move_desc(vc_req->head, &desc,
998                                 chain->para.src_data_len,
999                                 nb_descs, vq_size) < 0)) {
1000                         VC_LOG_ERR("Incorrect descriptor");
1001                         ret = VIRTIO_CRYPTO_ERR;
1002                         goto error_exit;
1003                 }
1004                 break;
1005         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1006                 vc_req->wb_pool = vcrypto->wb_pool;
1007
1008                 if (unlikely(chain->para.src_data_len >
1009                                 RTE_MBUF_DEFAULT_BUF_SIZE)) {
1010                         VC_LOG_ERR("Not enough space to do data copy");
1011                         ret = VIRTIO_CRYPTO_ERR;
1012                         goto error_exit;
1013                 }
1014                 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
1015                                 vc_req, &desc, chain->para.src_data_len,
1016                                 nb_descs, vq_size) < 0)) {
1017                         ret = VIRTIO_CRYPTO_BADMSG;
1018                         goto error_exit;
1019                 }
1020
1021                 break;
1022         default:
1023                 ret = VIRTIO_CRYPTO_BADMSG;
1024                 goto error_exit;
1025         }
1026
1027         /* dst */
1028         desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
1029         if (unlikely(!desc)) {
1030                 VC_LOG_ERR("Cannot find write location");
1031                 ret = VIRTIO_CRYPTO_BADMSG;
1032                 goto error_exit;
1033         }
1034
1035         switch (vcrypto->option) {
1036         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1037                 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
1038                                 desc->addr, chain->para.dst_data_len);
1039                 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
1040                 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
1041                         VC_LOG_ERR("zero_copy may fail due to cross page data");
1042                         ret = VIRTIO_CRYPTO_ERR;
1043                         goto error_exit;
1044                 }
1045
1046                 if (unlikely(move_desc(vc_req->head, &desc,
1047                                 chain->para.dst_data_len,
1048                                 nb_descs, vq_size) < 0)) {
1049                         VC_LOG_ERR("Incorrect descriptor");
1050                         ret = VIRTIO_CRYPTO_ERR;
1051                         goto error_exit;
1052                 }
1053
1054                 op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
1055                                 desc->addr, chain->para.hash_result_len);
1056                 op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
1057                                 VHOST_ACCESS_RW);
1058                 if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
1059                         VC_LOG_ERR("zero_copy may fail due to cross page data");
1060                         ret = VIRTIO_CRYPTO_ERR;
1061                         goto error_exit;
1062                 }
1063
1064                 if (unlikely(move_desc(vc_req->head, &desc,
1065                                 chain->para.hash_result_len,
1066                                 nb_descs, vq_size) < 0)) {
1067                         VC_LOG_ERR("Incorrect descriptor");
1068                         ret = VIRTIO_CRYPTO_ERR;
1069                         goto error_exit;
1070                 }
1071
1072                 break;
1073         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1074                 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
1075                                 rte_pktmbuf_mtod(m_src, uint8_t *),
1076                                 chain->para.cipher_start_src_offset,
1077                                 chain->para.dst_data_len -
1078                                 chain->para.cipher_start_src_offset,
1079                                 nb_descs, vq_size);
1080                 if (unlikely(vc_req->wb == NULL)) {
1081                         ret = VIRTIO_CRYPTO_ERR;
1082                         goto error_exit;
1083                 }
1084
1085                 digest_offset = m_src->data_len;
1086                 digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
1087                                 digest_offset);
1088                 digest_desc = desc;
1089
1090                 /** create a wb_data for digest */
1091                 ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
1092                                 digest_addr, 0, chain->para.hash_result_len,
1093                                 nb_descs, vq_size);
1094                 if (unlikely(ewb->next == NULL)) {
1095                         ret = VIRTIO_CRYPTO_ERR;
1096                         goto error_exit;
1097                 }
1098
1099                 if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
1100                                 chain->para.hash_result_len,
1101                                 nb_descs, vq_size) < 0)) {
1102                         ret = VIRTIO_CRYPTO_BADMSG;
1103                         goto error_exit;
1104                 }
1105
1106                 op->sym->auth.digest.data = digest_addr;
1107                 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
1108                                 digest_offset);
1109                 break;
1110         default:
1111                 ret = VIRTIO_CRYPTO_BADMSG;
1112                 goto error_exit;
1113         }
1114
1115         /* record inhdr */
1116         vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
1117         if (unlikely(vc_req->inhdr == NULL)) {
1118                 ret = VIRTIO_CRYPTO_BADMSG;
1119                 goto error_exit;
1120         }
1121
1122         vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
1123
1124         op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1125         op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1126
1127         op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
1128         op->sym->cipher.data.length = chain->para.src_data_len -
1129                         chain->para.cipher_start_src_offset;
1130
1131         op->sym->auth.data.offset = chain->para.hash_start_src_offset;
1132         op->sym->auth.data.length = chain->para.len_to_hash;
1133
1134         vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
1135                         INHDR_LEN;
1136         return 0;
1137
1138 error_exit:
1139         if (vc_req->wb)
1140                 free_wb_data(vc_req->wb, vc_req->wb_pool);
1141         vc_req->len = INHDR_LEN;
1142         return ret;
1143 }
1144
1145 /**
1146  * Process on descriptor
1147  */
1148 static __rte_always_inline int
1149 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
1150                 struct vhost_virtqueue *vq, struct rte_crypto_op *op,
1151                 struct vring_desc *head, uint16_t desc_idx)
1152 {
1153         struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
1154         struct rte_cryptodev_sym_session *session;
1155         struct virtio_crypto_op_data_req *req, tmp_req;
1156         struct virtio_crypto_inhdr *inhdr;
1157         struct vring_desc *desc = NULL;
1158         uint64_t session_id;
1159         uint64_t dlen;
1160         uint32_t nb_descs = vq->size;
1161         int err = 0;
1162
1163         vc_req->desc_idx = desc_idx;
1164         vc_req->dev = vcrypto->dev;
1165         vc_req->vq = vq;
1166
1167         if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
1168                 dlen = head->len;
1169                 nb_descs = dlen / sizeof(struct vring_desc);
1170                 /* drop invalid descriptors */
1171                 if (unlikely(nb_descs > vq->size))
1172                         return -1;
1173                 desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
1174                                 &dlen, VHOST_ACCESS_RO);
1175                 if (unlikely(!desc || dlen != head->len))
1176                         return -1;
1177                 desc_idx = 0;
1178                 head = desc;
1179         } else {
1180                 desc = head;
1181         }
1182
1183         vc_req->head = head;
1184         vc_req->zero_copy = vcrypto->option;
1185
1186         req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
1187         if (unlikely(req == NULL)) {
1188                 switch (vcrypto->option) {
1189                 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1190                         err = VIRTIO_CRYPTO_BADMSG;
1191                         VC_LOG_ERR("Invalid descriptor");
1192                         goto error_exit;
1193                 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1194                         req = &tmp_req;
1195                         if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
1196                                         &nb_descs, vq->size) < 0)) {
1197                                 err = VIRTIO_CRYPTO_BADMSG;
1198                                 VC_LOG_ERR("Invalid descriptor");
1199                                 goto error_exit;
1200                         }
1201                         break;
1202                 default:
1203                         err = VIRTIO_CRYPTO_ERR;
1204                         VC_LOG_ERR("Invalid option");
1205                         goto error_exit;
1206                 }
1207         } else {
1208                 if (unlikely(move_desc(vc_req->head, &desc,
1209                                 sizeof(*req), &nb_descs, vq->size) < 0)) {
1210                         VC_LOG_ERR("Incorrect descriptor");
1211                         goto error_exit;
1212                 }
1213         }
1214
1215         switch (req->header.opcode) {
1216         case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
1217         case VIRTIO_CRYPTO_CIPHER_DECRYPT:
1218                 session_id = req->header.session_id;
1219
1220                 /* one branch to avoid unnecessary table lookup */
1221                 if (vcrypto->cache_session_id != session_id) {
1222                         err = rte_hash_lookup_data(vcrypto->session_map,
1223                                         &session_id, (void **)&session);
1224                         if (unlikely(err < 0)) {
1225                                 err = VIRTIO_CRYPTO_ERR;
1226                                 VC_LOG_ERR("Failed to find session %"PRIu64,
1227                                                 session_id);
1228                                 goto error_exit;
1229                         }
1230
1231                         vcrypto->cache_session = session;
1232                         vcrypto->cache_session_id = session_id;
1233                 }
1234
1235                 session = vcrypto->cache_session;
1236
1237                 err = rte_crypto_op_attach_sym_session(op, session);
1238                 if (unlikely(err < 0)) {
1239                         err = VIRTIO_CRYPTO_ERR;
1240                         VC_LOG_ERR("Failed to attach session to op");
1241                         goto error_exit;
1242                 }
1243
1244                 switch (req->u.sym_req.op_type) {
1245                 case VIRTIO_CRYPTO_SYM_OP_NONE:
1246                         err = VIRTIO_CRYPTO_NOTSUPP;
1247                         break;
1248                 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1249                         err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1250                                         &req->u.sym_req.u.cipher, desc,
1251                                         &nb_descs, vq->size);
1252                         break;
1253                 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1254                         err = prepare_sym_chain_op(vcrypto, op, vc_req,
1255                                         &req->u.sym_req.u.chain, desc,
1256                                         &nb_descs, vq->size);
1257                         break;
1258                 }
1259                 if (unlikely(err != 0)) {
1260                         VC_LOG_ERR("Failed to process sym request");
1261                         goto error_exit;
1262                 }
1263                 break;
1264         default:
1265                 VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1266                                 req->header.opcode);
1267                 goto error_exit;
1268         }
1269
1270         return 0;
1271
1272 error_exit:
1273
1274         inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size);
1275         if (likely(inhdr != NULL))
1276                 inhdr->status = (uint8_t)err;
1277
1278         return -1;
1279 }
1280
1281 static __rte_always_inline struct vhost_virtqueue *
1282 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1283                 struct vhost_virtqueue *old_vq)
1284 {
1285         struct rte_mbuf *m_src = op->sym->m_src;
1286         struct rte_mbuf *m_dst = op->sym->m_dst;
1287         struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1288         uint16_t desc_idx;
1289
1290         if (unlikely(!vc_req)) {
1291                 VC_LOG_ERR("Failed to retrieve vc_req");
1292                 return NULL;
1293         }
1294
1295         if (old_vq && (vc_req->vq != old_vq))
1296                 return vc_req->vq;
1297
1298         desc_idx = vc_req->desc_idx;
1299
1300         if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1301                 vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1302         else {
1303                 if (vc_req->zero_copy == 0)
1304                         write_back_data(vc_req);
1305         }
1306
1307         vc_req->vq->used->ring[desc_idx].id = desc_idx;
1308         vc_req->vq->used->ring[desc_idx].len = vc_req->len;
1309
1310         rte_mempool_put(m_src->pool, (void *)m_src);
1311
1312         if (m_dst)
1313                 rte_mempool_put(m_dst->pool, (void *)m_dst);
1314
1315         return vc_req->vq;
1316 }
1317
1318 static __rte_always_inline uint16_t
1319 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1320                 uint16_t nb_ops, int *callfd)
1321 {
1322         uint16_t processed = 1;
1323         struct vhost_virtqueue *vq, *tmp_vq;
1324
1325         if (unlikely(nb_ops == 0))
1326                 return 0;
1327
1328         vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1329         if (unlikely(vq == NULL))
1330                 return 0;
1331         tmp_vq = vq;
1332
1333         while ((processed < nb_ops)) {
1334                 tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1335                                 tmp_vq);
1336
1337                 if (unlikely(vq != tmp_vq))
1338                         break;
1339
1340                 processed++;
1341         }
1342
1343         *callfd = vq->callfd;
1344
1345         *(volatile uint16_t *)&vq->used->idx += processed;
1346
1347         return processed;
1348 }
1349
1350 int __rte_experimental
1351 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1352                 struct rte_mempool *sess_pool,
1353                 struct rte_mempool *sess_priv_pool,
1354                 int socket_id)
1355 {
1356         struct virtio_net *dev = get_device(vid);
1357         struct rte_hash_parameters params = {0};
1358         struct vhost_crypto *vcrypto;
1359         char name[128];
1360         int ret;
1361
1362         if (!dev) {
1363                 VC_LOG_ERR("Invalid vid %i", vid);
1364                 return -EINVAL;
1365         }
1366
1367         ret = rte_vhost_driver_set_features(dev->ifname,
1368                         VIRTIO_CRYPTO_FEATURES);
1369         if (ret < 0) {
1370                 VC_LOG_ERR("Error setting features");
1371                 return -1;
1372         }
1373
1374         vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1375                         RTE_CACHE_LINE_SIZE, socket_id);
1376         if (!vcrypto) {
1377                 VC_LOG_ERR("Insufficient memory");
1378                 return -ENOMEM;
1379         }
1380
1381         vcrypto->sess_pool = sess_pool;
1382         vcrypto->sess_priv_pool = sess_priv_pool;
1383         vcrypto->cid = cryptodev_id;
1384         vcrypto->cache_session_id = UINT64_MAX;
1385         vcrypto->last_session_id = 1;
1386         vcrypto->dev = dev;
1387         vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1388
1389         snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1390         params.name = name;
1391         params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1392         params.hash_func = rte_jhash;
1393         params.key_len = sizeof(uint64_t);
1394         params.socket_id = socket_id;
1395         vcrypto->session_map = rte_hash_create(&params);
1396         if (!vcrypto->session_map) {
1397                 VC_LOG_ERR("Failed to creath session map");
1398                 ret = -ENOMEM;
1399                 goto error_exit;
1400         }
1401
1402         snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1403         vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1404                         VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1405                         sizeof(struct vhost_crypto_data_req),
1406                         RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM,
1407                         rte_socket_id());
1408         if (!vcrypto->mbuf_pool) {
1409                 VC_LOG_ERR("Failed to creath mbuf pool");
1410                 ret = -ENOMEM;
1411                 goto error_exit;
1412         }
1413
1414         snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1415         vcrypto->wb_pool = rte_mempool_create(name,
1416                         VHOST_CRYPTO_MBUF_POOL_SIZE,
1417                         sizeof(struct vhost_crypto_writeback_data),
1418                         128, 0, NULL, NULL, NULL, NULL,
1419                         rte_socket_id(), 0);
1420         if (!vcrypto->wb_pool) {
1421                 VC_LOG_ERR("Failed to creath mempool");
1422                 ret = -ENOMEM;
1423                 goto error_exit;
1424         }
1425
1426         dev->extern_data = vcrypto;
1427         dev->extern_ops.pre_msg_handle = NULL;
1428         dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1429
1430         return 0;
1431
1432 error_exit:
1433         if (vcrypto->session_map)
1434                 rte_hash_free(vcrypto->session_map);
1435         if (vcrypto->mbuf_pool)
1436                 rte_mempool_free(vcrypto->mbuf_pool);
1437
1438         rte_free(vcrypto);
1439
1440         return ret;
1441 }
1442
1443 int __rte_experimental
1444 rte_vhost_crypto_free(int vid)
1445 {
1446         struct virtio_net *dev = get_device(vid);
1447         struct vhost_crypto *vcrypto;
1448
1449         if (unlikely(dev == NULL)) {
1450                 VC_LOG_ERR("Invalid vid %i", vid);
1451                 return -EINVAL;
1452         }
1453
1454         vcrypto = dev->extern_data;
1455         if (unlikely(vcrypto == NULL)) {
1456                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1457                 return -ENOENT;
1458         }
1459
1460         rte_hash_free(vcrypto->session_map);
1461         rte_mempool_free(vcrypto->mbuf_pool);
1462         rte_mempool_free(vcrypto->wb_pool);
1463         rte_free(vcrypto);
1464
1465         dev->extern_data = NULL;
1466         dev->extern_ops.pre_msg_handle = NULL;
1467         dev->extern_ops.post_msg_handle = NULL;
1468
1469         return 0;
1470 }
1471
1472 int __rte_experimental
1473 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1474 {
1475         struct virtio_net *dev = get_device(vid);
1476         struct vhost_crypto *vcrypto;
1477
1478         if (unlikely(dev == NULL)) {
1479                 VC_LOG_ERR("Invalid vid %i", vid);
1480                 return -EINVAL;
1481         }
1482
1483         if (unlikely((uint32_t)option >=
1484                                 RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1485                 VC_LOG_ERR("Invalid option %i", option);
1486                 return -EINVAL;
1487         }
1488
1489         vcrypto = (struct vhost_crypto *)dev->extern_data;
1490         if (unlikely(vcrypto == NULL)) {
1491                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1492                 return -ENOENT;
1493         }
1494
1495         if (vcrypto->option == (uint8_t)option)
1496                 return 0;
1497
1498         if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
1499                         !(rte_mempool_full(vcrypto->wb_pool))) {
1500                 VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1501                 return -EINVAL;
1502         }
1503
1504         if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
1505                 char name[128];
1506
1507                 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1508                 vcrypto->wb_pool = rte_mempool_create(name,
1509                                 VHOST_CRYPTO_MBUF_POOL_SIZE,
1510                                 sizeof(struct vhost_crypto_writeback_data),
1511                                 128, 0, NULL, NULL, NULL, NULL,
1512                                 rte_socket_id(), 0);
1513                 if (!vcrypto->wb_pool) {
1514                         VC_LOG_ERR("Failed to creath mbuf pool");
1515                         return -ENOMEM;
1516                 }
1517         } else {
1518                 rte_mempool_free(vcrypto->wb_pool);
1519                 vcrypto->wb_pool = NULL;
1520         }
1521
1522         vcrypto->option = (uint8_t)option;
1523
1524         return 0;
1525 }
1526
1527 uint16_t __rte_experimental
1528 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1529                 struct rte_crypto_op **ops, uint16_t nb_ops)
1530 {
1531         struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1532         struct virtio_net *dev = get_device(vid);
1533         struct vhost_crypto *vcrypto;
1534         struct vhost_virtqueue *vq;
1535         uint16_t avail_idx;
1536         uint16_t start_idx;
1537         uint16_t count;
1538         uint16_t i = 0;
1539
1540         if (unlikely(dev == NULL)) {
1541                 VC_LOG_ERR("Invalid vid %i", vid);
1542                 return -EINVAL;
1543         }
1544
1545         if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1546                 VC_LOG_ERR("Invalid qid %u", qid);
1547                 return -EINVAL;
1548         }
1549
1550         vcrypto = (struct vhost_crypto *)dev->extern_data;
1551         if (unlikely(vcrypto == NULL)) {
1552                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1553                 return -ENOENT;
1554         }
1555
1556         vq = dev->virtqueue[qid];
1557
1558         avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1559         start_idx = vq->last_used_idx;
1560         count = avail_idx - start_idx;
1561         count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1562         count = RTE_MIN(count, nb_ops);
1563
1564         if (unlikely(count == 0))
1565                 return 0;
1566
1567         /* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1568          * we need only 1 mbuf as src and dst
1569          */
1570         switch (vcrypto->option) {
1571         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1572                 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1573                                 (void **)mbufs, count * 2) < 0)) {
1574                         VC_LOG_ERR("Insufficient memory");
1575                         return -ENOMEM;
1576                 }
1577
1578                 for (i = 0; i < count; i++) {
1579                         uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1580                         uint16_t desc_idx = vq->avail->ring[used_idx];
1581                         struct vring_desc *head = &vq->desc[desc_idx];
1582                         struct rte_crypto_op *op = ops[i];
1583
1584                         op->sym->m_src = mbufs[i * 2];
1585                         op->sym->m_dst = mbufs[i * 2 + 1];
1586                         op->sym->m_src->data_off = 0;
1587                         op->sym->m_dst->data_off = 0;
1588
1589                         if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1590                                         op, head, desc_idx) < 0))
1591                                 break;
1592                 }
1593
1594                 if (unlikely(i < count))
1595                         rte_mempool_put_bulk(vcrypto->mbuf_pool,
1596                                         (void **)&mbufs[i * 2],
1597                                         (count - i) * 2);
1598
1599                 break;
1600
1601         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1602                 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1603                                 (void **)mbufs, count) < 0)) {
1604                         VC_LOG_ERR("Insufficient memory");
1605                         return -ENOMEM;
1606                 }
1607
1608                 for (i = 0; i < count; i++) {
1609                         uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1610                         uint16_t desc_idx = vq->avail->ring[used_idx];
1611                         struct vring_desc *head = &vq->desc[desc_idx];
1612                         struct rte_crypto_op *op = ops[i];
1613
1614                         op->sym->m_src = mbufs[i];
1615                         op->sym->m_dst = NULL;
1616                         op->sym->m_src->data_off = 0;
1617
1618                         if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1619                                         op, head, desc_idx) < 0))
1620                                 break;
1621                 }
1622
1623                 if (unlikely(i < count))
1624                         rte_mempool_put_bulk(vcrypto->mbuf_pool,
1625                                         (void **)&mbufs[i],
1626                                         count - i);
1627
1628                 break;
1629
1630         }
1631
1632         vq->last_used_idx += i;
1633
1634         return i;
1635 }
1636
1637 uint16_t __rte_experimental
1638 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1639                 uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1640 {
1641         struct rte_crypto_op **tmp_ops = ops;
1642         uint16_t count = 0, left = nb_ops;
1643         int callfd;
1644         uint16_t idx = 0;
1645
1646         while (left) {
1647                 count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1648                                 &callfd);
1649                 if (unlikely(count == 0))
1650                         break;
1651
1652                 tmp_ops = &tmp_ops[count];
1653                 left -= count;
1654
1655                 callfds[idx++] = callfd;
1656
1657                 if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1658                         VC_LOG_ERR("Too many vqs");
1659                         break;
1660                 }
1661         }
1662
1663         *nb_callfds = idx;
1664
1665         return nb_ops - left;
1666 }