0f437c4a1d7ddf7da4e7799d48a437425caa157d
[dpdk.git] / lib / librte_vhost / vhost_crypto.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 #include <rte_malloc.h>
5 #include <rte_hash.h>
6 #include <rte_jhash.h>
7 #include <rte_mbuf.h>
8 #include <rte_cryptodev.h>
9
10 #include "rte_vhost_crypto.h"
11 #include "vhost.h"
12 #include "vhost_user.h"
13 #include "virtio_crypto.h"
14
15 #define INHDR_LEN               (sizeof(struct virtio_crypto_inhdr))
16 #define IV_OFFSET               (sizeof(struct rte_crypto_op) + \
17                                 sizeof(struct rte_crypto_sym_op))
18
19 #ifdef RTE_LIBRTE_VHOST_DEBUG
20 #define VC_LOG_ERR(fmt, args...)                                \
21         RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n",     \
22                 "Vhost-Crypto", __func__, __LINE__, ## args)
23 #define VC_LOG_INFO(fmt, args...)                               \
24         RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n",    \
25                 "Vhost-Crypto", __func__, __LINE__, ## args)
26
27 #define VC_LOG_DBG(fmt, args...)                                \
28         RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n",   \
29                 "Vhost-Crypto", __func__, __LINE__, ## args)
30 #else
31 #define VC_LOG_ERR(fmt, args...)                                \
32         RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
33 #define VC_LOG_INFO(fmt, args...)                               \
34         RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
35 #define VC_LOG_DBG(fmt, args...)
36 #endif
37
38 #define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) |       \
39                 (1 << VIRTIO_RING_F_INDIRECT_DESC) |                    \
40                 (1 << VIRTIO_RING_F_EVENT_IDX) |                        \
41                 (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) |                   \
42                 (1 << VIRTIO_CRYPTO_SERVICE_MAC) |                      \
43                 (1 << VIRTIO_NET_F_CTRL_VQ))
44
45 #define IOVA_TO_VVA(t, r, a, l, p)                                      \
46         ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
47
48 static int
49 cipher_algo_transform(uint32_t virtio_cipher_algo)
50 {
51         int ret;
52
53         switch (virtio_cipher_algo) {
54         case VIRTIO_CRYPTO_CIPHER_AES_CBC:
55                 ret = RTE_CRYPTO_CIPHER_AES_CBC;
56                 break;
57         case VIRTIO_CRYPTO_CIPHER_AES_CTR:
58                 ret = RTE_CRYPTO_CIPHER_AES_CTR;
59                 break;
60         case VIRTIO_CRYPTO_CIPHER_DES_ECB:
61                 ret = -VIRTIO_CRYPTO_NOTSUPP;
62                 break;
63         case VIRTIO_CRYPTO_CIPHER_DES_CBC:
64                 ret = RTE_CRYPTO_CIPHER_DES_CBC;
65                 break;
66         case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
67                 ret = RTE_CRYPTO_CIPHER_3DES_ECB;
68                 break;
69         case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
70                 ret = RTE_CRYPTO_CIPHER_3DES_CBC;
71                 break;
72         case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
73                 ret = RTE_CRYPTO_CIPHER_3DES_CTR;
74                 break;
75         case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
76                 ret = RTE_CRYPTO_CIPHER_KASUMI_F8;
77                 break;
78         case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
79                 ret = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
80                 break;
81         case VIRTIO_CRYPTO_CIPHER_AES_F8:
82                 ret = RTE_CRYPTO_CIPHER_AES_F8;
83                 break;
84         case VIRTIO_CRYPTO_CIPHER_AES_XTS:
85                 ret = RTE_CRYPTO_CIPHER_AES_XTS;
86                 break;
87         case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
88                 ret = RTE_CRYPTO_CIPHER_ZUC_EEA3;
89                 break;
90         default:
91                 ret = -VIRTIO_CRYPTO_BADMSG;
92                 break;
93         }
94
95         return ret;
96 }
97
98 static int
99 auth_algo_transform(uint32_t virtio_auth_algo)
100 {
101         int ret;
102
103         switch (virtio_auth_algo) {
104
105         case VIRTIO_CRYPTO_NO_MAC:
106                 ret = RTE_CRYPTO_AUTH_NULL;
107                 break;
108         case VIRTIO_CRYPTO_MAC_HMAC_MD5:
109                 ret = RTE_CRYPTO_AUTH_MD5_HMAC;
110                 break;
111         case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
112                 ret = RTE_CRYPTO_AUTH_SHA1_HMAC;
113                 break;
114         case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
115                 ret = RTE_CRYPTO_AUTH_SHA224_HMAC;
116                 break;
117         case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
118                 ret = RTE_CRYPTO_AUTH_SHA256_HMAC;
119                 break;
120         case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
121                 ret = RTE_CRYPTO_AUTH_SHA384_HMAC;
122                 break;
123         case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
124                 ret = RTE_CRYPTO_AUTH_SHA512_HMAC;
125                 break;
126         case VIRTIO_CRYPTO_MAC_CMAC_3DES:
127                 ret = -VIRTIO_CRYPTO_NOTSUPP;
128                 break;
129         case VIRTIO_CRYPTO_MAC_CMAC_AES:
130                 ret = RTE_CRYPTO_AUTH_AES_CMAC;
131                 break;
132         case VIRTIO_CRYPTO_MAC_KASUMI_F9:
133                 ret = RTE_CRYPTO_AUTH_KASUMI_F9;
134                 break;
135         case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
136                 ret = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
137                 break;
138         case VIRTIO_CRYPTO_MAC_GMAC_AES:
139                 ret = RTE_CRYPTO_AUTH_AES_GMAC;
140                 break;
141         case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
142                 ret = -VIRTIO_CRYPTO_NOTSUPP;
143                 break;
144         case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
145                 ret = RTE_CRYPTO_AUTH_AES_CBC_MAC;
146                 break;
147         case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
148                 ret = -VIRTIO_CRYPTO_NOTSUPP;
149                 break;
150         case VIRTIO_CRYPTO_MAC_XCBC_AES:
151                 ret = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
152                 break;
153         default:
154                 ret = -VIRTIO_CRYPTO_BADMSG;
155                 break;
156         }
157
158         return ret;
159 }
160
161 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
162 {
163         int len;
164
165         switch (algo) {
166         case RTE_CRYPTO_CIPHER_3DES_CBC:
167                 len = 8;
168                 break;
169         case RTE_CRYPTO_CIPHER_3DES_CTR:
170                 len = 8;
171                 break;
172         case RTE_CRYPTO_CIPHER_3DES_ECB:
173                 len = 8;
174                 break;
175         case RTE_CRYPTO_CIPHER_AES_CBC:
176                 len = 16;
177                 break;
178
179         /* TODO: add common algos */
180
181         default:
182                 len = -1;
183                 break;
184         }
185
186         return len;
187 }
188
189 /**
190  * vhost_crypto struct is used to maintain a number of virtio_cryptos and
191  * one DPDK crypto device that deals with all crypto workloads. It is declared
192  * here and defined in vhost_crypto.c
193  */
194 struct vhost_crypto {
195         /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
196          *  session ID.
197          */
198         struct rte_hash *session_map;
199         struct rte_mempool *mbuf_pool;
200         struct rte_mempool *sess_pool;
201         struct rte_mempool *sess_priv_pool;
202         struct rte_mempool *wb_pool;
203
204         /** DPDK cryptodev ID */
205         uint8_t cid;
206         uint16_t nb_qps;
207
208         uint64_t last_session_id;
209
210         uint64_t cache_session_id;
211         struct rte_cryptodev_sym_session *cache_session;
212         /** socket id for the device */
213         int socket_id;
214
215         struct virtio_net *dev;
216
217         uint8_t option;
218 } __rte_cache_aligned;
219
220 struct vhost_crypto_writeback_data {
221         uint8_t *src;
222         uint8_t *dst;
223         uint64_t len;
224         struct vhost_crypto_writeback_data *next;
225 };
226
227 struct vhost_crypto_data_req {
228         struct vring_desc *head;
229         struct virtio_net *dev;
230         struct virtio_crypto_inhdr *inhdr;
231         struct vhost_virtqueue *vq;
232         struct vhost_crypto_writeback_data *wb;
233         struct rte_mempool *wb_pool;
234         uint16_t desc_idx;
235         uint16_t len;
236         uint16_t zero_copy;
237 };
238
239 static int
240 transform_cipher_param(struct rte_crypto_sym_xform *xform,
241                 VhostUserCryptoSessionParam *param)
242 {
243         int ret;
244
245         ret = cipher_algo_transform(param->cipher_algo);
246         if (unlikely(ret < 0))
247                 return ret;
248
249         xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
250         xform->cipher.algo = (enum rte_crypto_cipher_algorithm)ret;
251         xform->cipher.key.length = param->cipher_key_len;
252         if (xform->cipher.key.length > 0)
253                 xform->cipher.key.data = param->cipher_key_buf;
254         if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
255                 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
256         else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
257                 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
258         else {
259                 VC_LOG_DBG("Bad operation type");
260                 return -VIRTIO_CRYPTO_BADMSG;
261         }
262
263         ret = get_iv_len(xform->cipher.algo);
264         if (unlikely(ret < 0))
265                 return ret;
266         xform->cipher.iv.length = (uint16_t)ret;
267         xform->cipher.iv.offset = IV_OFFSET;
268         return 0;
269 }
270
271 static int
272 transform_chain_param(struct rte_crypto_sym_xform *xforms,
273                 VhostUserCryptoSessionParam *param)
274 {
275         struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
276         int ret;
277
278         switch (param->chaining_dir) {
279         case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
280                 xform_auth = xforms;
281                 xform_cipher = xforms->next;
282                 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
283                 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
284                 break;
285         case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
286                 xform_cipher = xforms;
287                 xform_auth = xforms->next;
288                 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
289                 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
290                 break;
291         default:
292                 return -VIRTIO_CRYPTO_BADMSG;
293         }
294
295         /* cipher */
296         ret = cipher_algo_transform(param->cipher_algo);
297         if (unlikely(ret < 0))
298                 return ret;
299         xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
300         xform_cipher->cipher.algo = (enum rte_crypto_cipher_algorithm)ret;
301         xform_cipher->cipher.key.length = param->cipher_key_len;
302         xform_cipher->cipher.key.data = param->cipher_key_buf;
303         ret = get_iv_len(xform_cipher->cipher.algo);
304         if (unlikely(ret < 0))
305                 return ret;
306         xform_cipher->cipher.iv.length = (uint16_t)ret;
307         xform_cipher->cipher.iv.offset = IV_OFFSET;
308
309         /* auth */
310         xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
311         ret = auth_algo_transform(param->hash_algo);
312         if (unlikely(ret < 0))
313                 return ret;
314         xform_auth->auth.algo = (enum rte_crypto_auth_algorithm)ret;
315         xform_auth->auth.digest_length = param->digest_len;
316         xform_auth->auth.key.length = param->auth_key_len;
317         xform_auth->auth.key.data = param->auth_key_buf;
318
319         return 0;
320 }
321
322 static void
323 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
324                 VhostUserCryptoSessionParam *sess_param)
325 {
326         struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
327         struct rte_cryptodev_sym_session *session;
328         int ret;
329
330         switch (sess_param->op_type) {
331         case VIRTIO_CRYPTO_SYM_OP_NONE:
332         case VIRTIO_CRYPTO_SYM_OP_CIPHER:
333                 ret = transform_cipher_param(&xform1, sess_param);
334                 if (unlikely(ret)) {
335                         VC_LOG_ERR("Error transform session msg (%i)", ret);
336                         sess_param->session_id = ret;
337                         return;
338                 }
339                 break;
340         case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
341                 if (unlikely(sess_param->hash_mode !=
342                                 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
343                         sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
344                         VC_LOG_ERR("Error transform session message (%i)",
345                                         -VIRTIO_CRYPTO_NOTSUPP);
346                         return;
347                 }
348
349                 xform1.next = &xform2;
350
351                 ret = transform_chain_param(&xform1, sess_param);
352                 if (unlikely(ret)) {
353                         VC_LOG_ERR("Error transform session message (%i)", ret);
354                         sess_param->session_id = ret;
355                         return;
356                 }
357
358                 break;
359         default:
360                 VC_LOG_ERR("Algorithm not yet supported");
361                 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
362                 return;
363         }
364
365         session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
366         if (!session) {
367                 VC_LOG_ERR("Failed to create session");
368                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
369                 return;
370         }
371
372         if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
373                         vcrypto->sess_priv_pool) < 0) {
374                 VC_LOG_ERR("Failed to initialize session");
375                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
376                 return;
377         }
378
379         /* insert hash to map */
380         if (rte_hash_add_key_data(vcrypto->session_map,
381                         &vcrypto->last_session_id, session) < 0) {
382                 VC_LOG_ERR("Failed to insert session to hash table");
383
384                 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
385                         VC_LOG_ERR("Failed to clear session");
386                 else {
387                         if (rte_cryptodev_sym_session_free(session) < 0)
388                                 VC_LOG_ERR("Failed to free session");
389                 }
390                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
391                 return;
392         }
393
394         VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
395                         vcrypto->last_session_id, vcrypto->dev->vid);
396
397         sess_param->session_id = vcrypto->last_session_id;
398         vcrypto->last_session_id++;
399 }
400
401 static int
402 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
403 {
404         struct rte_cryptodev_sym_session *session;
405         uint64_t sess_id = session_id;
406         int ret;
407
408         ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
409                         (void **)&session);
410
411         if (unlikely(ret < 0)) {
412                 VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
413                 return -VIRTIO_CRYPTO_INVSESS;
414         }
415
416         if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
417                 VC_LOG_DBG("Failed to clear session");
418                 return -VIRTIO_CRYPTO_ERR;
419         }
420
421         if (rte_cryptodev_sym_session_free(session) < 0) {
422                 VC_LOG_DBG("Failed to free session");
423                 return -VIRTIO_CRYPTO_ERR;
424         }
425
426         if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
427                 VC_LOG_DBG("Failed to delete session from hash table.");
428                 return -VIRTIO_CRYPTO_ERR;
429         }
430
431         VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
432                         vcrypto->dev->vid);
433
434         return 0;
435 }
436
437 static enum rte_vhost_msg_result
438 vhost_crypto_msg_post_handler(int vid, void *msg)
439 {
440         struct virtio_net *dev = get_device(vid);
441         struct vhost_crypto *vcrypto;
442         VhostUserMsg *vmsg = msg;
443         enum rte_vhost_msg_result ret = RTE_VHOST_MSG_RESULT_OK;
444
445         if (dev == NULL) {
446                 VC_LOG_ERR("Invalid vid %i", vid);
447                 return RTE_VHOST_MSG_RESULT_ERR;
448         }
449
450         vcrypto = dev->extern_data;
451         if (vcrypto == NULL) {
452                 VC_LOG_ERR("Cannot find required data, is it initialized?");
453                 return RTE_VHOST_MSG_RESULT_ERR;
454         }
455
456         if (vmsg->request.master == VHOST_USER_CRYPTO_CREATE_SESS) {
457                 vhost_crypto_create_sess(vcrypto,
458                                 &vmsg->payload.crypto_session);
459                 vmsg->fd_num = 0;
460                 ret = RTE_VHOST_MSG_RESULT_REPLY;
461         } else if (vmsg->request.master == VHOST_USER_CRYPTO_CLOSE_SESS) {
462                 if (vhost_crypto_close_sess(vcrypto, vmsg->payload.u64))
463                         ret = RTE_VHOST_MSG_RESULT_ERR;
464         }
465
466         return ret;
467 }
468
469 static __rte_always_inline struct vring_desc *
470 find_write_desc(struct vring_desc *head, struct vring_desc *desc,
471                 uint32_t *nb_descs, uint32_t vq_size)
472 {
473         if (desc->flags & VRING_DESC_F_WRITE)
474                 return desc;
475
476         while (desc->flags & VRING_DESC_F_NEXT) {
477                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
478                         return NULL;
479                 (*nb_descs)--;
480
481                 desc = &head[desc->next];
482                 if (desc->flags & VRING_DESC_F_WRITE)
483                         return desc;
484         }
485
486         return NULL;
487 }
488
489 static struct virtio_crypto_inhdr *
490 reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
491                 uint32_t *nb_descs, uint32_t vq_size)
492 {
493         uint64_t dlen;
494         struct virtio_crypto_inhdr *inhdr;
495
496         while (desc->flags & VRING_DESC_F_NEXT) {
497                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
498                         return NULL;
499                 (*nb_descs)--;
500                 desc = &vc_req->head[desc->next];
501         }
502
503         dlen = desc->len;
504         inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
505                         &dlen, VHOST_ACCESS_WO);
506         if (unlikely(!inhdr || dlen != desc->len))
507                 return NULL;
508
509         return inhdr;
510 }
511
512 static __rte_always_inline int
513 move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
514                 uint32_t size, uint32_t *nb_descs, uint32_t vq_size)
515 {
516         struct vring_desc *desc = *cur_desc;
517         int left = size - desc->len;
518
519         while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
520                 (*nb_descs)--;
521                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
522                         return -1;
523
524                 desc = &head[desc->next];
525                 rte_prefetch0(&head[desc->next]);
526                 left -= desc->len;
527         }
528
529         if (unlikely(left > 0))
530                 return -1;
531
532         if (unlikely(*nb_descs == 0))
533                 *cur_desc = NULL;
534         else {
535                 if (unlikely(desc->next >= vq_size))
536                         return -1;
537                 *cur_desc = &head[desc->next];
538         }
539
540         return 0;
541 }
542
543 static __rte_always_inline void *
544 get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
545                 uint8_t perm)
546 {
547         void *data;
548         uint64_t dlen = cur_desc->len;
549
550         data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
551         if (unlikely(!data || dlen != cur_desc->len)) {
552                 VC_LOG_ERR("Failed to map object");
553                 return NULL;
554         }
555
556         return data;
557 }
558
559 static int
560 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
561                 struct vring_desc **cur_desc, uint32_t size,
562                 uint32_t *nb_descs, uint32_t vq_size)
563 {
564         struct vring_desc *desc = *cur_desc;
565         uint64_t remain, addr, dlen, len;
566         uint32_t to_copy;
567         uint8_t *data = dst_data;
568         uint8_t *src;
569         int left = size;
570
571         to_copy = RTE_MIN(desc->len, (uint32_t)left);
572         dlen = to_copy;
573         src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
574                         VHOST_ACCESS_RO);
575         if (unlikely(!src || !dlen))
576                 return -1;
577
578         rte_memcpy((uint8_t *)data, src, dlen);
579         data += dlen;
580
581         if (unlikely(dlen < to_copy)) {
582                 remain = to_copy - dlen;
583                 addr = desc->addr + dlen;
584
585                 while (remain) {
586                         len = remain;
587                         src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
588                                         VHOST_ACCESS_RO);
589                         if (unlikely(!src || !len)) {
590                                 VC_LOG_ERR("Failed to map descriptor");
591                                 return -1;
592                         }
593
594                         rte_memcpy(data, src, len);
595                         addr += len;
596                         remain -= len;
597                         data += len;
598                 }
599         }
600
601         left -= to_copy;
602
603         while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
604                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
605                         VC_LOG_ERR("Invalid descriptors");
606                         return -1;
607                 }
608                 (*nb_descs)--;
609
610                 desc = &vc_req->head[desc->next];
611                 rte_prefetch0(&vc_req->head[desc->next]);
612                 to_copy = RTE_MIN(desc->len, (uint32_t)left);
613                 dlen = desc->len;
614                 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
615                                 VHOST_ACCESS_RO);
616                 if (unlikely(!src || !dlen)) {
617                         VC_LOG_ERR("Failed to map descriptor");
618                         return -1;
619                 }
620
621                 rte_memcpy(data, src, dlen);
622                 data += dlen;
623
624                 if (unlikely(dlen < to_copy)) {
625                         remain = to_copy - dlen;
626                         addr = desc->addr + dlen;
627
628                         while (remain) {
629                                 len = remain;
630                                 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
631                                                 VHOST_ACCESS_RO);
632                                 if (unlikely(!src || !len)) {
633                                         VC_LOG_ERR("Failed to map descriptor");
634                                         return -1;
635                                 }
636
637                                 rte_memcpy(data, src, len);
638                                 addr += len;
639                                 remain -= len;
640                                 data += len;
641                         }
642                 }
643
644                 left -= to_copy;
645         }
646
647         if (unlikely(left > 0)) {
648                 VC_LOG_ERR("Incorrect virtio descriptor");
649                 return -1;
650         }
651
652         if (unlikely(*nb_descs == 0))
653                 *cur_desc = NULL;
654         else {
655                 if (unlikely(desc->next >= vq_size))
656                         return -1;
657                 *cur_desc = &vc_req->head[desc->next];
658         }
659
660         return 0;
661 }
662
663 static void
664 write_back_data(struct vhost_crypto_data_req *vc_req)
665 {
666         struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
667
668         while (wb_data) {
669                 rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
670                 wb_last = wb_data;
671                 wb_data = wb_data->next;
672                 rte_mempool_put(vc_req->wb_pool, wb_last);
673         }
674 }
675
676 static void
677 free_wb_data(struct vhost_crypto_writeback_data *wb_data,
678                 struct rte_mempool *mp)
679 {
680         while (wb_data->next != NULL)
681                 free_wb_data(wb_data->next, mp);
682
683         rte_mempool_put(mp, wb_data);
684 }
685
686 /**
687  * The function will allocate a vhost_crypto_writeback_data linked list
688  * containing the source and destination data pointers for the write back
689  * operation after dequeued from Cryptodev PMD queues.
690  *
691  * @param vc_req
692  *   The vhost crypto data request pointer
693  * @param cur_desc
694  *   The pointer of the current in use descriptor pointer. The content of
695  *   cur_desc is expected to be updated after the function execution.
696  * @param end_wb_data
697  *   The last write back data element to be returned. It is used only in cipher
698  *   and hash chain operations.
699  * @param src
700  *   The source data pointer
701  * @param offset
702  *   The offset to both source and destination data. For source data the offset
703  *   is the number of bytes between src and start point of cipher operation. For
704  *   destination data the offset is the number of bytes from *cur_desc->addr
705  *   to the point where the src will be written to.
706  * @param write_back_len
707  *   The size of the write back length.
708  * @return
709  *   The pointer to the start of the write back data linked list.
710  */
711 static struct vhost_crypto_writeback_data *
712 prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
713                 struct vring_desc **cur_desc,
714                 struct vhost_crypto_writeback_data **end_wb_data,
715                 uint8_t *src,
716                 uint32_t offset,
717                 uint64_t write_back_len,
718                 uint32_t *nb_descs, uint32_t vq_size)
719 {
720         struct vhost_crypto_writeback_data *wb_data, *head;
721         struct vring_desc *desc = *cur_desc;
722         uint64_t dlen;
723         uint8_t *dst;
724         int ret;
725
726         ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
727         if (unlikely(ret < 0)) {
728                 VC_LOG_ERR("no memory");
729                 goto error_exit;
730         }
731
732         wb_data = head;
733
734         if (likely(desc->len > offset)) {
735                 wb_data->src = src + offset;
736                 dlen = desc->len;
737                 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
738                         &dlen, VHOST_ACCESS_RW) + offset;
739                 if (unlikely(!dst || dlen != desc->len)) {
740                         VC_LOG_ERR("Failed to map descriptor");
741                         goto error_exit;
742                 }
743
744                 wb_data->dst = dst;
745                 wb_data->len = desc->len - offset;
746                 write_back_len -= wb_data->len;
747                 src += offset + wb_data->len;
748                 offset = 0;
749
750                 if (unlikely(write_back_len)) {
751                         ret = rte_mempool_get(vc_req->wb_pool,
752                                         (void **)&(wb_data->next));
753                         if (unlikely(ret < 0)) {
754                                 VC_LOG_ERR("no memory");
755                                 goto error_exit;
756                         }
757
758                         wb_data = wb_data->next;
759                 } else
760                         wb_data->next = NULL;
761         } else
762                 offset -= desc->len;
763
764         while (write_back_len) {
765                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
766                         VC_LOG_ERR("Invalid descriptors");
767                         goto error_exit;
768                 }
769                 (*nb_descs)--;
770
771                 desc = &vc_req->head[desc->next];
772                 if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
773                         VC_LOG_ERR("incorrect descriptor");
774                         goto error_exit;
775                 }
776
777                 if (desc->len <= offset) {
778                         offset -= desc->len;
779                         continue;
780                 }
781
782                 dlen = desc->len;
783                 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
784                                 VHOST_ACCESS_RW) + offset;
785                 if (unlikely(dst == NULL || dlen != desc->len)) {
786                         VC_LOG_ERR("Failed to map descriptor");
787                         goto error_exit;
788                 }
789
790                 wb_data->src = src;
791                 wb_data->dst = dst;
792                 wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
793                 write_back_len -= wb_data->len;
794                 src += wb_data->len;
795                 offset = 0;
796
797                 if (write_back_len) {
798                         ret = rte_mempool_get(vc_req->wb_pool,
799                                         (void **)&(wb_data->next));
800                         if (unlikely(ret < 0)) {
801                                 VC_LOG_ERR("no memory");
802                                 goto error_exit;
803                         }
804
805                         wb_data = wb_data->next;
806                 } else
807                         wb_data->next = NULL;
808         }
809
810         if (unlikely(*nb_descs == 0))
811                 *cur_desc = NULL;
812         else {
813                 if (unlikely(desc->next >= vq_size))
814                         goto error_exit;
815                 *cur_desc = &vc_req->head[desc->next];
816         }
817
818         *end_wb_data = wb_data;
819
820         return head;
821
822 error_exit:
823         if (head)
824                 free_wb_data(head, vc_req->wb_pool);
825
826         return NULL;
827 }
828
829 static uint8_t
830 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
831                 struct vhost_crypto_data_req *vc_req,
832                 struct virtio_crypto_cipher_data_req *cipher,
833                 struct vring_desc *cur_desc,
834                 uint32_t *nb_descs, uint32_t vq_size)
835 {
836         struct vring_desc *desc = cur_desc;
837         struct vhost_crypto_writeback_data *ewb = NULL;
838         struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
839         uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
840         uint8_t ret = 0;
841
842         /* prepare */
843         /* iv */
844         if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
845                         nb_descs, vq_size) < 0)) {
846                 ret = VIRTIO_CRYPTO_BADMSG;
847                 goto error_exit;
848         }
849
850         m_src->data_len = cipher->para.src_data_len;
851
852         switch (vcrypto->option) {
853         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
854                 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
855                                 cipher->para.src_data_len);
856                 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
857                 if (unlikely(m_src->buf_iova == 0 ||
858                                 m_src->buf_addr == NULL)) {
859                         VC_LOG_ERR("zero_copy may fail due to cross page data");
860                         ret = VIRTIO_CRYPTO_ERR;
861                         goto error_exit;
862                 }
863
864                 if (unlikely(move_desc(vc_req->head, &desc,
865                                 cipher->para.src_data_len, nb_descs,
866                                 vq_size) < 0)) {
867                         VC_LOG_ERR("Incorrect descriptor");
868                         ret = VIRTIO_CRYPTO_ERR;
869                         goto error_exit;
870                 }
871
872                 break;
873         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
874                 vc_req->wb_pool = vcrypto->wb_pool;
875
876                 if (unlikely(cipher->para.src_data_len >
877                                 RTE_MBUF_DEFAULT_BUF_SIZE)) {
878                         VC_LOG_ERR("Not enough space to do data copy");
879                         ret = VIRTIO_CRYPTO_ERR;
880                         goto error_exit;
881                 }
882                 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
883                                 vc_req, &desc, cipher->para.src_data_len,
884                                 nb_descs, vq_size) < 0)) {
885                         ret = VIRTIO_CRYPTO_BADMSG;
886                         goto error_exit;
887                 }
888                 break;
889         default:
890                 ret = VIRTIO_CRYPTO_BADMSG;
891                 goto error_exit;
892         }
893
894         /* dst */
895         desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
896         if (unlikely(!desc)) {
897                 VC_LOG_ERR("Cannot find write location");
898                 ret = VIRTIO_CRYPTO_BADMSG;
899                 goto error_exit;
900         }
901
902         switch (vcrypto->option) {
903         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
904                 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
905                                 desc->addr, cipher->para.dst_data_len);
906                 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
907                 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
908                         VC_LOG_ERR("zero_copy may fail due to cross page data");
909                         ret = VIRTIO_CRYPTO_ERR;
910                         goto error_exit;
911                 }
912
913                 if (unlikely(move_desc(vc_req->head, &desc,
914                                 cipher->para.dst_data_len,
915                                 nb_descs, vq_size) < 0)) {
916                         VC_LOG_ERR("Incorrect descriptor");
917                         ret = VIRTIO_CRYPTO_ERR;
918                         goto error_exit;
919                 }
920
921                 m_dst->data_len = cipher->para.dst_data_len;
922                 break;
923         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
924                 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
925                                 rte_pktmbuf_mtod(m_src, uint8_t *), 0,
926                                 cipher->para.dst_data_len, nb_descs, vq_size);
927                 if (unlikely(vc_req->wb == NULL)) {
928                         ret = VIRTIO_CRYPTO_ERR;
929                         goto error_exit;
930                 }
931
932                 break;
933         default:
934                 ret = VIRTIO_CRYPTO_BADMSG;
935                 goto error_exit;
936         }
937
938         /* src data */
939         op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
940         op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
941
942         op->sym->cipher.data.offset = 0;
943         op->sym->cipher.data.length = cipher->para.src_data_len;
944
945         vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
946         if (unlikely(vc_req->inhdr == NULL)) {
947                 ret = VIRTIO_CRYPTO_BADMSG;
948                 goto error_exit;
949         }
950
951         vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
952         vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
953
954         return 0;
955
956 error_exit:
957         if (vc_req->wb)
958                 free_wb_data(vc_req->wb, vc_req->wb_pool);
959
960         vc_req->len = INHDR_LEN;
961         return ret;
962 }
963
964 static uint8_t
965 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
966                 struct vhost_crypto_data_req *vc_req,
967                 struct virtio_crypto_alg_chain_data_req *chain,
968                 struct vring_desc *cur_desc,
969                 uint32_t *nb_descs, uint32_t vq_size)
970 {
971         struct vring_desc *desc = cur_desc, *digest_desc;
972         struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
973         struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
974         uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
975         uint32_t digest_offset;
976         void *digest_addr;
977         uint8_t ret = 0;
978
979         /* prepare */
980         /* iv */
981         if (unlikely(copy_data(iv_data, vc_req, &desc,
982                         chain->para.iv_len, nb_descs, vq_size) < 0)) {
983                 ret = VIRTIO_CRYPTO_BADMSG;
984                 goto error_exit;
985         }
986
987         m_src->data_len = chain->para.src_data_len;
988
989         switch (vcrypto->option) {
990         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
991                 m_dst->data_len = chain->para.dst_data_len;
992
993                 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
994                                 chain->para.src_data_len);
995                 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
996                 if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
997                         VC_LOG_ERR("zero_copy may fail due to cross page data");
998                         ret = VIRTIO_CRYPTO_ERR;
999                         goto error_exit;
1000                 }
1001
1002                 if (unlikely(move_desc(vc_req->head, &desc,
1003                                 chain->para.src_data_len,
1004                                 nb_descs, vq_size) < 0)) {
1005                         VC_LOG_ERR("Incorrect descriptor");
1006                         ret = VIRTIO_CRYPTO_ERR;
1007                         goto error_exit;
1008                 }
1009                 break;
1010         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1011                 vc_req->wb_pool = vcrypto->wb_pool;
1012
1013                 if (unlikely(chain->para.src_data_len >
1014                                 RTE_MBUF_DEFAULT_BUF_SIZE)) {
1015                         VC_LOG_ERR("Not enough space to do data copy");
1016                         ret = VIRTIO_CRYPTO_ERR;
1017                         goto error_exit;
1018                 }
1019                 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
1020                                 vc_req, &desc, chain->para.src_data_len,
1021                                 nb_descs, vq_size)) < 0) {
1022                         ret = VIRTIO_CRYPTO_BADMSG;
1023                         goto error_exit;
1024                 }
1025
1026                 break;
1027         default:
1028                 ret = VIRTIO_CRYPTO_BADMSG;
1029                 goto error_exit;
1030         }
1031
1032         /* dst */
1033         desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
1034         if (unlikely(!desc)) {
1035                 VC_LOG_ERR("Cannot find write location");
1036                 ret = VIRTIO_CRYPTO_BADMSG;
1037                 goto error_exit;
1038         }
1039
1040         switch (vcrypto->option) {
1041         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1042                 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
1043                                 desc->addr, chain->para.dst_data_len);
1044                 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
1045                 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
1046                         VC_LOG_ERR("zero_copy may fail due to cross page data");
1047                         ret = VIRTIO_CRYPTO_ERR;
1048                         goto error_exit;
1049                 }
1050
1051                 if (unlikely(move_desc(vc_req->head, &desc,
1052                                 chain->para.dst_data_len,
1053                                 nb_descs, vq_size) < 0)) {
1054                         VC_LOG_ERR("Incorrect descriptor");
1055                         ret = VIRTIO_CRYPTO_ERR;
1056                         goto error_exit;
1057                 }
1058
1059                 op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
1060                                 desc->addr, chain->para.hash_result_len);
1061                 op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
1062                                 VHOST_ACCESS_RW);
1063                 if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
1064                         VC_LOG_ERR("zero_copy may fail due to cross page data");
1065                         ret = VIRTIO_CRYPTO_ERR;
1066                         goto error_exit;
1067                 }
1068
1069                 if (unlikely(move_desc(vc_req->head, &desc,
1070                                 chain->para.hash_result_len,
1071                                 nb_descs, vq_size) < 0)) {
1072                         VC_LOG_ERR("Incorrect descriptor");
1073                         ret = VIRTIO_CRYPTO_ERR;
1074                         goto error_exit;
1075                 }
1076
1077                 break;
1078         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1079                 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
1080                                 rte_pktmbuf_mtod(m_src, uint8_t *),
1081                                 chain->para.cipher_start_src_offset,
1082                                 chain->para.dst_data_len -
1083                                 chain->para.cipher_start_src_offset,
1084                                 nb_descs, vq_size);
1085                 if (unlikely(vc_req->wb == NULL)) {
1086                         ret = VIRTIO_CRYPTO_ERR;
1087                         goto error_exit;
1088                 }
1089
1090                 digest_offset = m_src->data_len;
1091                 digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
1092                                 digest_offset);
1093                 digest_desc = desc;
1094
1095                 /** create a wb_data for digest */
1096                 ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
1097                                 digest_addr, 0, chain->para.hash_result_len,
1098                                 nb_descs, vq_size);
1099                 if (unlikely(ewb->next == NULL)) {
1100                         ret = VIRTIO_CRYPTO_ERR;
1101                         goto error_exit;
1102                 }
1103
1104                 if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
1105                                 chain->para.hash_result_len,
1106                                 nb_descs, vq_size)) < 0) {
1107                         ret = VIRTIO_CRYPTO_BADMSG;
1108                         goto error_exit;
1109                 }
1110
1111                 op->sym->auth.digest.data = digest_addr;
1112                 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
1113                                 digest_offset);
1114                 break;
1115         default:
1116                 ret = VIRTIO_CRYPTO_BADMSG;
1117                 goto error_exit;
1118         }
1119
1120         /* record inhdr */
1121         vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
1122         if (unlikely(vc_req->inhdr == NULL)) {
1123                 ret = VIRTIO_CRYPTO_BADMSG;
1124                 goto error_exit;
1125         }
1126
1127         vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
1128
1129         op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1130         op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1131
1132         op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
1133         op->sym->cipher.data.length = chain->para.src_data_len -
1134                         chain->para.cipher_start_src_offset;
1135
1136         op->sym->auth.data.offset = chain->para.hash_start_src_offset;
1137         op->sym->auth.data.length = chain->para.len_to_hash;
1138
1139         vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
1140                         INHDR_LEN;
1141         return 0;
1142
1143 error_exit:
1144         if (vc_req->wb)
1145                 free_wb_data(vc_req->wb, vc_req->wb_pool);
1146         vc_req->len = INHDR_LEN;
1147         return ret;
1148 }
1149
1150 /**
1151  * Process on descriptor
1152  */
1153 static __rte_always_inline int
1154 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
1155                 struct vhost_virtqueue *vq, struct rte_crypto_op *op,
1156                 struct vring_desc *head, uint16_t desc_idx)
1157 {
1158         struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
1159         struct rte_cryptodev_sym_session *session;
1160         struct virtio_crypto_op_data_req *req, tmp_req;
1161         struct virtio_crypto_inhdr *inhdr;
1162         struct vring_desc *desc = NULL;
1163         uint64_t session_id;
1164         uint64_t dlen;
1165         uint32_t nb_descs = vq->size;
1166         int err = 0;
1167
1168         vc_req->desc_idx = desc_idx;
1169         vc_req->dev = vcrypto->dev;
1170         vc_req->vq = vq;
1171
1172         if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
1173                 dlen = head->len;
1174                 nb_descs = dlen / sizeof(struct vring_desc);
1175                 /* drop invalid descriptors */
1176                 if (unlikely(nb_descs > vq->size))
1177                         return -1;
1178                 desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
1179                                 &dlen, VHOST_ACCESS_RO);
1180                 if (unlikely(!desc || dlen != head->len))
1181                         return -1;
1182                 desc_idx = 0;
1183                 head = desc;
1184         } else {
1185                 desc = head;
1186         }
1187
1188         vc_req->head = head;
1189         vc_req->zero_copy = vcrypto->option;
1190
1191         req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
1192         if (unlikely(req == NULL)) {
1193                 switch (vcrypto->option) {
1194                 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1195                         err = VIRTIO_CRYPTO_BADMSG;
1196                         VC_LOG_ERR("Invalid descriptor");
1197                         goto error_exit;
1198                 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1199                         req = &tmp_req;
1200                         if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
1201                                         &nb_descs, vq->size) < 0)) {
1202                                 err = VIRTIO_CRYPTO_BADMSG;
1203                                 VC_LOG_ERR("Invalid descriptor");
1204                                 goto error_exit;
1205                         }
1206                         break;
1207                 default:
1208                         err = VIRTIO_CRYPTO_ERR;
1209                         VC_LOG_ERR("Invalid option");
1210                         goto error_exit;
1211                 }
1212         } else {
1213                 if (unlikely(move_desc(vc_req->head, &desc,
1214                                 sizeof(*req), &nb_descs, vq->size) < 0)) {
1215                         VC_LOG_ERR("Incorrect descriptor");
1216                         goto error_exit;
1217                 }
1218         }
1219
1220         switch (req->header.opcode) {
1221         case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
1222         case VIRTIO_CRYPTO_CIPHER_DECRYPT:
1223                 session_id = req->header.session_id;
1224
1225                 /* one branch to avoid unnecessary table lookup */
1226                 if (vcrypto->cache_session_id != session_id) {
1227                         err = rte_hash_lookup_data(vcrypto->session_map,
1228                                         &session_id, (void **)&session);
1229                         if (unlikely(err < 0)) {
1230                                 err = VIRTIO_CRYPTO_ERR;
1231                                 VC_LOG_ERR("Failed to find session %"PRIu64,
1232                                                 session_id);
1233                                 goto error_exit;
1234                         }
1235
1236                         vcrypto->cache_session = session;
1237                         vcrypto->cache_session_id = session_id;
1238                 }
1239
1240                 session = vcrypto->cache_session;
1241
1242                 err = rte_crypto_op_attach_sym_session(op, session);
1243                 if (unlikely(err < 0)) {
1244                         err = VIRTIO_CRYPTO_ERR;
1245                         VC_LOG_ERR("Failed to attach session to op");
1246                         goto error_exit;
1247                 }
1248
1249                 switch (req->u.sym_req.op_type) {
1250                 case VIRTIO_CRYPTO_SYM_OP_NONE:
1251                         err = VIRTIO_CRYPTO_NOTSUPP;
1252                         break;
1253                 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1254                         err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1255                                         &req->u.sym_req.u.cipher, desc,
1256                                         &nb_descs, vq->size);
1257                         break;
1258                 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1259                         err = prepare_sym_chain_op(vcrypto, op, vc_req,
1260                                         &req->u.sym_req.u.chain, desc,
1261                                         &nb_descs, vq->size);
1262                         break;
1263                 }
1264                 if (unlikely(err != 0)) {
1265                         VC_LOG_ERR("Failed to process sym request");
1266                         goto error_exit;
1267                 }
1268                 break;
1269         default:
1270                 VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1271                                 req->header.opcode);
1272                 goto error_exit;
1273         }
1274
1275         return 0;
1276
1277 error_exit:
1278
1279         inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size);
1280         if (likely(inhdr != NULL))
1281                 inhdr->status = (uint8_t)err;
1282
1283         return -1;
1284 }
1285
1286 static __rte_always_inline struct vhost_virtqueue *
1287 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1288                 struct vhost_virtqueue *old_vq)
1289 {
1290         struct rte_mbuf *m_src = op->sym->m_src;
1291         struct rte_mbuf *m_dst = op->sym->m_dst;
1292         struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1293         uint16_t desc_idx;
1294
1295         if (unlikely(!vc_req)) {
1296                 VC_LOG_ERR("Failed to retrieve vc_req");
1297                 return NULL;
1298         }
1299
1300         if (old_vq && (vc_req->vq != old_vq))
1301                 return vc_req->vq;
1302
1303         desc_idx = vc_req->desc_idx;
1304
1305         if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1306                 vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1307         else {
1308                 if (vc_req->zero_copy == 0)
1309                         write_back_data(vc_req);
1310         }
1311
1312         vc_req->vq->used->ring[desc_idx].id = desc_idx;
1313         vc_req->vq->used->ring[desc_idx].len = vc_req->len;
1314
1315         rte_mempool_put(m_src->pool, (void *)m_src);
1316
1317         if (m_dst)
1318                 rte_mempool_put(m_dst->pool, (void *)m_dst);
1319
1320         return vc_req->vq;
1321 }
1322
1323 static __rte_always_inline uint16_t
1324 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1325                 uint16_t nb_ops, int *callfd)
1326 {
1327         uint16_t processed = 1;
1328         struct vhost_virtqueue *vq, *tmp_vq;
1329
1330         if (unlikely(nb_ops == 0))
1331                 return 0;
1332
1333         vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1334         if (unlikely(vq == NULL))
1335                 return 0;
1336         tmp_vq = vq;
1337
1338         while ((processed < nb_ops)) {
1339                 tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1340                                 tmp_vq);
1341
1342                 if (unlikely(vq != tmp_vq))
1343                         break;
1344
1345                 processed++;
1346         }
1347
1348         *callfd = vq->callfd;
1349
1350         *(volatile uint16_t *)&vq->used->idx += processed;
1351
1352         return processed;
1353 }
1354
1355 int __rte_experimental
1356 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1357                 struct rte_mempool *sess_pool,
1358                 struct rte_mempool *sess_priv_pool,
1359                 int socket_id)
1360 {
1361         struct virtio_net *dev = get_device(vid);
1362         struct rte_hash_parameters params = {0};
1363         struct vhost_crypto *vcrypto;
1364         char name[128];
1365         int ret;
1366
1367         if (!dev) {
1368                 VC_LOG_ERR("Invalid vid %i", vid);
1369                 return -EINVAL;
1370         }
1371
1372         ret = rte_vhost_driver_set_features(dev->ifname,
1373                         VIRTIO_CRYPTO_FEATURES);
1374         if (ret < 0) {
1375                 VC_LOG_ERR("Error setting features");
1376                 return -1;
1377         }
1378
1379         vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1380                         RTE_CACHE_LINE_SIZE, socket_id);
1381         if (!vcrypto) {
1382                 VC_LOG_ERR("Insufficient memory");
1383                 return -ENOMEM;
1384         }
1385
1386         vcrypto->sess_pool = sess_pool;
1387         vcrypto->sess_priv_pool = sess_priv_pool;
1388         vcrypto->cid = cryptodev_id;
1389         vcrypto->cache_session_id = UINT64_MAX;
1390         vcrypto->last_session_id = 1;
1391         vcrypto->dev = dev;
1392         vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1393
1394         snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1395         params.name = name;
1396         params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1397         params.hash_func = rte_jhash;
1398         params.key_len = sizeof(uint64_t);
1399         params.socket_id = socket_id;
1400         vcrypto->session_map = rte_hash_create(&params);
1401         if (!vcrypto->session_map) {
1402                 VC_LOG_ERR("Failed to creath session map");
1403                 ret = -ENOMEM;
1404                 goto error_exit;
1405         }
1406
1407         snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1408         vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1409                         VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1410                         sizeof(struct vhost_crypto_data_req),
1411                         RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM,
1412                         rte_socket_id());
1413         if (!vcrypto->mbuf_pool) {
1414                 VC_LOG_ERR("Failed to creath mbuf pool");
1415                 ret = -ENOMEM;
1416                 goto error_exit;
1417         }
1418
1419         snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1420         vcrypto->wb_pool = rte_mempool_create(name,
1421                         VHOST_CRYPTO_MBUF_POOL_SIZE,
1422                         sizeof(struct vhost_crypto_writeback_data),
1423                         128, 0, NULL, NULL, NULL, NULL,
1424                         rte_socket_id(), 0);
1425         if (!vcrypto->wb_pool) {
1426                 VC_LOG_ERR("Failed to creath mempool");
1427                 ret = -ENOMEM;
1428                 goto error_exit;
1429         }
1430
1431         dev->extern_data = vcrypto;
1432         dev->extern_ops.pre_msg_handle = NULL;
1433         dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1434
1435         return 0;
1436
1437 error_exit:
1438         if (vcrypto->session_map)
1439                 rte_hash_free(vcrypto->session_map);
1440         if (vcrypto->mbuf_pool)
1441                 rte_mempool_free(vcrypto->mbuf_pool);
1442
1443         rte_free(vcrypto);
1444
1445         return ret;
1446 }
1447
1448 int __rte_experimental
1449 rte_vhost_crypto_free(int vid)
1450 {
1451         struct virtio_net *dev = get_device(vid);
1452         struct vhost_crypto *vcrypto;
1453
1454         if (unlikely(dev == NULL)) {
1455                 VC_LOG_ERR("Invalid vid %i", vid);
1456                 return -EINVAL;
1457         }
1458
1459         vcrypto = dev->extern_data;
1460         if (unlikely(vcrypto == NULL)) {
1461                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1462                 return -ENOENT;
1463         }
1464
1465         rte_hash_free(vcrypto->session_map);
1466         rte_mempool_free(vcrypto->mbuf_pool);
1467         rte_mempool_free(vcrypto->wb_pool);
1468         rte_free(vcrypto);
1469
1470         dev->extern_data = NULL;
1471         dev->extern_ops.pre_msg_handle = NULL;
1472         dev->extern_ops.post_msg_handle = NULL;
1473
1474         return 0;
1475 }
1476
1477 int __rte_experimental
1478 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1479 {
1480         struct virtio_net *dev = get_device(vid);
1481         struct vhost_crypto *vcrypto;
1482
1483         if (unlikely(dev == NULL)) {
1484                 VC_LOG_ERR("Invalid vid %i", vid);
1485                 return -EINVAL;
1486         }
1487
1488         if (unlikely((uint32_t)option >=
1489                                 RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1490                 VC_LOG_ERR("Invalid option %i", option);
1491                 return -EINVAL;
1492         }
1493
1494         vcrypto = (struct vhost_crypto *)dev->extern_data;
1495         if (unlikely(vcrypto == NULL)) {
1496                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1497                 return -ENOENT;
1498         }
1499
1500         if (vcrypto->option == (uint8_t)option)
1501                 return 0;
1502
1503         if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
1504                         !(rte_mempool_full(vcrypto->wb_pool))) {
1505                 VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1506                 return -EINVAL;
1507         }
1508
1509         if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
1510                 char name[128];
1511
1512                 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1513                 vcrypto->wb_pool = rte_mempool_create(name,
1514                                 VHOST_CRYPTO_MBUF_POOL_SIZE,
1515                                 sizeof(struct vhost_crypto_writeback_data),
1516                                 128, 0, NULL, NULL, NULL, NULL,
1517                                 rte_socket_id(), 0);
1518                 if (!vcrypto->wb_pool) {
1519                         VC_LOG_ERR("Failed to creath mbuf pool");
1520                         return -ENOMEM;
1521                 }
1522         } else {
1523                 rte_mempool_free(vcrypto->wb_pool);
1524                 vcrypto->wb_pool = NULL;
1525         }
1526
1527         vcrypto->option = (uint8_t)option;
1528
1529         return 0;
1530 }
1531
1532 uint16_t __rte_experimental
1533 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1534                 struct rte_crypto_op **ops, uint16_t nb_ops)
1535 {
1536         struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1537         struct virtio_net *dev = get_device(vid);
1538         struct vhost_crypto *vcrypto;
1539         struct vhost_virtqueue *vq;
1540         uint16_t avail_idx;
1541         uint16_t start_idx;
1542         uint16_t count;
1543         uint16_t i = 0;
1544
1545         if (unlikely(dev == NULL)) {
1546                 VC_LOG_ERR("Invalid vid %i", vid);
1547                 return -EINVAL;
1548         }
1549
1550         if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1551                 VC_LOG_ERR("Invalid qid %u", qid);
1552                 return -EINVAL;
1553         }
1554
1555         vcrypto = (struct vhost_crypto *)dev->extern_data;
1556         if (unlikely(vcrypto == NULL)) {
1557                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1558                 return -ENOENT;
1559         }
1560
1561         vq = dev->virtqueue[qid];
1562
1563         avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1564         start_idx = vq->last_used_idx;
1565         count = avail_idx - start_idx;
1566         count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1567         count = RTE_MIN(count, nb_ops);
1568
1569         if (unlikely(count == 0))
1570                 return 0;
1571
1572         /* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1573          * we need only 1 mbuf as src and dst
1574          */
1575         switch (vcrypto->option) {
1576         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1577                 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1578                                 (void **)mbufs, count * 2) < 0)) {
1579                         VC_LOG_ERR("Insufficient memory");
1580                         return -ENOMEM;
1581                 }
1582
1583                 for (i = 0; i < count; i++) {
1584                         uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1585                         uint16_t desc_idx = vq->avail->ring[used_idx];
1586                         struct vring_desc *head = &vq->desc[desc_idx];
1587                         struct rte_crypto_op *op = ops[i];
1588
1589                         op->sym->m_src = mbufs[i * 2];
1590                         op->sym->m_dst = mbufs[i * 2 + 1];
1591                         op->sym->m_src->data_off = 0;
1592                         op->sym->m_dst->data_off = 0;
1593
1594                         if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1595                                         op, head, desc_idx)) < 0)
1596                                 break;
1597                 }
1598
1599                 if (unlikely(i < count))
1600                         rte_mempool_put_bulk(vcrypto->mbuf_pool,
1601                                         (void **)&mbufs[i * 2],
1602                                         (count - i) * 2);
1603
1604                 break;
1605
1606         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1607                 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1608                                 (void **)mbufs, count) < 0)) {
1609                         VC_LOG_ERR("Insufficient memory");
1610                         return -ENOMEM;
1611                 }
1612
1613                 for (i = 0; i < count; i++) {
1614                         uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1615                         uint16_t desc_idx = vq->avail->ring[used_idx];
1616                         struct vring_desc *head = &vq->desc[desc_idx];
1617                         struct rte_crypto_op *op = ops[i];
1618
1619                         op->sym->m_src = mbufs[i];
1620                         op->sym->m_dst = NULL;
1621                         op->sym->m_src->data_off = 0;
1622
1623                         if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1624                                         op, head, desc_idx)) < 0)
1625                                 break;
1626                 }
1627
1628                 if (unlikely(i < count))
1629                         rte_mempool_put_bulk(vcrypto->mbuf_pool,
1630                                         (void **)&mbufs[i],
1631                                         count - i);
1632
1633                 break;
1634
1635         }
1636
1637         vq->last_used_idx += i;
1638
1639         return i;
1640 }
1641
1642 uint16_t __rte_experimental
1643 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1644                 uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1645 {
1646         struct rte_crypto_op **tmp_ops = ops;
1647         uint16_t count = 0, left = nb_ops;
1648         int callfd;
1649         uint16_t idx = 0;
1650
1651         while (left) {
1652                 count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1653                                 &callfd);
1654                 if (unlikely(count == 0))
1655                         break;
1656
1657                 tmp_ops = &tmp_ops[count];
1658                 left -= count;
1659
1660                 callfds[idx++] = callfd;
1661
1662                 if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1663                         VC_LOG_ERR("Too many vqs");
1664                         break;
1665                 }
1666         }
1667
1668         *nb_callfds = idx;
1669
1670         return nb_ops - left;
1671 }