crypto/cnxk: support ESN and anti-replay
[dpdk.git] / drivers / crypto / virtio / virtio_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 #include <stdbool.h>
5 #include <unistd.h>
6
7 #include <rte_common.h>
8 #include <rte_errno.h>
9 #include <rte_pci.h>
10 #include <rte_bus_pci.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_eal.h>
14
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
18 #include "virtio_crypto_capabilities.h"
19
20 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
21                 struct rte_cryptodev_config *config);
22 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
23 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
24 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
25 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
26                 struct rte_cryptodev_info *dev_info);
27 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
28                 struct rte_cryptodev_stats *stats);
29 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
30 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
31                 uint16_t queue_pair_id,
32                 const struct rte_cryptodev_qp_conf *qp_conf,
33                 int socket_id);
34 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
35                 uint16_t queue_pair_id);
36 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
37 static unsigned int virtio_crypto_sym_get_session_private_size(
38                 struct rte_cryptodev *dev);
39 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
40                 struct rte_cryptodev_sym_session *sess);
41 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
42                 struct rte_crypto_sym_xform *xform,
43                 struct rte_cryptodev_sym_session *session,
44                 struct rte_mempool *mp);
45
46 /*
47  * The set of PCI devices this driver supports
48  */
49 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
50         { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
51                                 VIRTIO_CRYPTO_PCI_DEVICEID) },
52         { .vendor_id = 0, /* sentinel */ },
53 };
54
55 static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
56         VIRTIO_SYM_CAPABILITIES,
57         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
58 };
59
60 uint8_t cryptodev_virtio_driver_id;
61
62 #define NUM_ENTRY_SYM_CREATE_SESSION 4
63
64 static int
65 virtio_crypto_send_command(struct virtqueue *vq,
66                 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
67                 uint8_t *auth_key, struct virtio_crypto_session *session)
68 {
69         uint8_t idx = 0;
70         uint8_t needed = 1;
71         uint32_t head = 0;
72         uint32_t len_cipher_key = 0;
73         uint32_t len_auth_key = 0;
74         uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
75         uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
76         uint32_t len_total = 0;
77         uint32_t input_offset = 0;
78         void *virt_addr_started = NULL;
79         phys_addr_t phys_addr_started;
80         struct vring_desc *desc;
81         uint32_t desc_offset;
82         struct virtio_crypto_session_input *input;
83         int ret;
84
85         PMD_INIT_FUNC_TRACE();
86
87         if (session == NULL) {
88                 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
89                 return -EINVAL;
90         }
91         /* cipher only is supported, it is available if auth_key is NULL */
92         if (!cipher_key) {
93                 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
94                 return -EINVAL;
95         }
96
97         head = vq->vq_desc_head_idx;
98         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
99                                         head, vq);
100
101         if (vq->vq_free_cnt < needed) {
102                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
103                 return -ENOSPC;
104         }
105
106         /* calculate the length of cipher key */
107         if (cipher_key) {
108                 switch (ctrl->u.sym_create_session.op_type) {
109                 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
110                         len_cipher_key
111                                 = ctrl->u.sym_create_session.u.cipher
112                                                         .para.keylen;
113                         break;
114                 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
115                         len_cipher_key
116                                 = ctrl->u.sym_create_session.u.chain
117                                         .para.cipher_param.keylen;
118                         break;
119                 default:
120                         VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
121                         return -EINVAL;
122                 }
123         }
124
125         /* calculate the length of auth key */
126         if (auth_key) {
127                 len_auth_key =
128                         ctrl->u.sym_create_session.u.chain.para.u.mac_param
129                                 .auth_key_len;
130         }
131
132         /*
133          * malloc memory to store indirect vring_desc entries, including
134          * ctrl request, cipher key, auth key, session input and desc vring
135          */
136         desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
137                 + len_session_input;
138         virt_addr_started = rte_malloc(NULL,
139                 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
140                         * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
141         if (virt_addr_started == NULL) {
142                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
143                 return -ENOSPC;
144         }
145         phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
146
147         /* address to store indirect vring desc entries */
148         desc = (struct vring_desc *)
149                 ((uint8_t *)virt_addr_started + desc_offset);
150
151         /*  ctrl req part */
152         memcpy(virt_addr_started, ctrl, len_ctrl_req);
153         desc[idx].addr = phys_addr_started;
154         desc[idx].len = len_ctrl_req;
155         desc[idx].flags = VRING_DESC_F_NEXT;
156         desc[idx].next = idx + 1;
157         idx++;
158         len_total += len_ctrl_req;
159         input_offset += len_ctrl_req;
160
161         /* cipher key part */
162         if (len_cipher_key > 0) {
163                 memcpy((uint8_t *)virt_addr_started + len_total,
164                         cipher_key, len_cipher_key);
165
166                 desc[idx].addr = phys_addr_started + len_total;
167                 desc[idx].len = len_cipher_key;
168                 desc[idx].flags = VRING_DESC_F_NEXT;
169                 desc[idx].next = idx + 1;
170                 idx++;
171                 len_total += len_cipher_key;
172                 input_offset += len_cipher_key;
173         }
174
175         /* auth key part */
176         if (len_auth_key > 0) {
177                 memcpy((uint8_t *)virt_addr_started + len_total,
178                         auth_key, len_auth_key);
179
180                 desc[idx].addr = phys_addr_started + len_total;
181                 desc[idx].len = len_auth_key;
182                 desc[idx].flags = VRING_DESC_F_NEXT;
183                 desc[idx].next = idx + 1;
184                 idx++;
185                 len_total += len_auth_key;
186                 input_offset += len_auth_key;
187         }
188
189         /* input part */
190         input = (struct virtio_crypto_session_input *)
191                 ((uint8_t *)virt_addr_started + input_offset);
192         input->status = VIRTIO_CRYPTO_ERR;
193         input->session_id = ~0ULL;
194         desc[idx].addr = phys_addr_started + len_total;
195         desc[idx].len = len_session_input;
196         desc[idx].flags = VRING_DESC_F_WRITE;
197         idx++;
198
199         /* use a single desc entry */
200         vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
201         vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
202         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
203         vq->vq_free_cnt--;
204
205         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
206
207         vq_update_avail_ring(vq, head);
208         vq_update_avail_idx(vq);
209
210         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
211                                         vq->vq_queue_index);
212
213         virtqueue_notify(vq);
214
215         rte_rmb();
216         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
217                 rte_rmb();
218                 usleep(100);
219         }
220
221         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
222                 uint32_t idx, desc_idx, used_idx;
223                 struct vring_used_elem *uep;
224
225                 used_idx = (uint32_t)(vq->vq_used_cons_idx
226                                 & (vq->vq_nentries - 1));
227                 uep = &vq->vq_ring.used->ring[used_idx];
228                 idx = (uint32_t) uep->id;
229                 desc_idx = idx;
230
231                 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
232                         desc_idx = vq->vq_ring.desc[desc_idx].next;
233                         vq->vq_free_cnt++;
234                 }
235
236                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
237                 vq->vq_desc_head_idx = idx;
238
239                 vq->vq_used_cons_idx++;
240                 vq->vq_free_cnt++;
241         }
242
243         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
244                         "vq->vq_desc_head_idx=%d",
245                         vq->vq_free_cnt, vq->vq_desc_head_idx);
246
247         /* get the result */
248         if (input->status != VIRTIO_CRYPTO_OK) {
249                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
250                                 "status=%u, session_id=%" PRIu64 "",
251                                 input->status, input->session_id);
252                 rte_free(virt_addr_started);
253                 ret = -1;
254         } else {
255                 session->session_id = input->session_id;
256
257                 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
258                                 "session_id=%" PRIu64 "", input->session_id);
259                 rte_free(virt_addr_started);
260                 ret = 0;
261         }
262
263         return ret;
264 }
265
266 void
267 virtio_crypto_queue_release(struct virtqueue *vq)
268 {
269         struct virtio_crypto_hw *hw;
270
271         PMD_INIT_FUNC_TRACE();
272
273         if (vq) {
274                 hw = vq->hw;
275                 /* Select and deactivate the queue */
276                 VTPCI_OPS(hw)->del_queue(hw, vq);
277
278                 rte_memzone_free(vq->mz);
279                 rte_mempool_free(vq->mpool);
280                 rte_free(vq);
281         }
282 }
283
284 #define MPOOL_MAX_NAME_SZ 32
285
286 int
287 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
288                 int queue_type,
289                 uint16_t vtpci_queue_idx,
290                 uint16_t nb_desc,
291                 int socket_id,
292                 struct virtqueue **pvq)
293 {
294         char vq_name[VIRTQUEUE_MAX_NAME_SZ];
295         char mpool_name[MPOOL_MAX_NAME_SZ];
296         const struct rte_memzone *mz;
297         unsigned int vq_size, size;
298         struct virtio_crypto_hw *hw = dev->data->dev_private;
299         struct virtqueue *vq = NULL;
300         uint32_t i = 0;
301         uint32_t j;
302
303         PMD_INIT_FUNC_TRACE();
304
305         VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
306
307         /*
308          * Read the virtqueue size from the Queue Size field
309          * Always power of 2 and if 0 virtqueue does not exist
310          */
311         vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
312         if (vq_size == 0) {
313                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
314                 return -EINVAL;
315         }
316         VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
317
318         if (!rte_is_power_of_2(vq_size)) {
319                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
320                 return -EINVAL;
321         }
322
323         if (queue_type == VTCRYPTO_DATAQ) {
324                 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
325                                 dev->data->dev_id, vtpci_queue_idx);
326                 snprintf(mpool_name, sizeof(mpool_name),
327                                 "dev%d_dataqueue%d_mpool",
328                                 dev->data->dev_id, vtpci_queue_idx);
329         } else if (queue_type == VTCRYPTO_CTRLQ) {
330                 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
331                                 dev->data->dev_id);
332                 snprintf(mpool_name, sizeof(mpool_name),
333                                 "dev%d_controlqueue_mpool",
334                                 dev->data->dev_id);
335         }
336         size = RTE_ALIGN_CEIL(sizeof(*vq) +
337                                 vq_size * sizeof(struct vq_desc_extra),
338                                 RTE_CACHE_LINE_SIZE);
339         vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
340                                 socket_id);
341         if (vq == NULL) {
342                 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
343                 return -ENOMEM;
344         }
345
346         if (queue_type == VTCRYPTO_DATAQ) {
347                 /* pre-allocate a mempool and use it in the data plane to
348                  * improve performance
349                  */
350                 vq->mpool = rte_mempool_lookup(mpool_name);
351                 if (vq->mpool == NULL)
352                         vq->mpool = rte_mempool_create(mpool_name,
353                                         vq_size,
354                                         sizeof(struct virtio_crypto_op_cookie),
355                                         RTE_CACHE_LINE_SIZE, 0,
356                                         NULL, NULL, NULL, NULL, socket_id,
357                                         0);
358                 if (!vq->mpool) {
359                         VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
360                                         "Cannot create mempool");
361                         goto mpool_create_err;
362                 }
363                 for (i = 0; i < vq_size; i++) {
364                         vq->vq_descx[i].cookie =
365                                 rte_zmalloc("crypto PMD op cookie pointer",
366                                         sizeof(struct virtio_crypto_op_cookie),
367                                         RTE_CACHE_LINE_SIZE);
368                         if (vq->vq_descx[i].cookie == NULL) {
369                                 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
370                                                 "alloc mem for cookie");
371                                 goto cookie_alloc_err;
372                         }
373                 }
374         }
375
376         vq->hw = hw;
377         vq->dev_id = dev->data->dev_id;
378         vq->vq_queue_index = vtpci_queue_idx;
379         vq->vq_nentries = vq_size;
380
381         /*
382          * Using part of the vring entries is permitted, but the maximum
383          * is vq_size
384          */
385         if (nb_desc == 0 || nb_desc > vq_size)
386                 nb_desc = vq_size;
387         vq->vq_free_cnt = nb_desc;
388
389         /*
390          * Reserve a memzone for vring elements
391          */
392         size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
393         vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
394         VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
395                         (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
396                         size, vq->vq_ring_size);
397
398         mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
399                         socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
400         if (mz == NULL) {
401                 if (rte_errno == EEXIST)
402                         mz = rte_memzone_lookup(vq_name);
403                 if (mz == NULL) {
404                         VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
405                         goto mz_reserve_err;
406                 }
407         }
408
409         /*
410          * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
411          * and only accepts 32 bit page frame number.
412          * Check if the allocated physical memory exceeds 16TB.
413          */
414         if ((mz->iova + vq->vq_ring_size - 1)
415                                 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
416                 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
417                                         "above 16TB!");
418                 goto vring_addr_err;
419         }
420
421         memset(mz->addr, 0, sizeof(mz->len));
422         vq->mz = mz;
423         vq->vq_ring_mem = mz->iova;
424         vq->vq_ring_virt_mem = mz->addr;
425         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
426                                         (uint64_t)mz->iova);
427         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
428                                         (uint64_t)(uintptr_t)mz->addr);
429
430         *pvq = vq;
431
432         return 0;
433
434 vring_addr_err:
435         rte_memzone_free(mz);
436 mz_reserve_err:
437 cookie_alloc_err:
438         rte_mempool_free(vq->mpool);
439         if (i != 0) {
440                 for (j = 0; j < i; j++)
441                         rte_free(vq->vq_descx[j].cookie);
442         }
443 mpool_create_err:
444         rte_free(vq);
445         return -ENOMEM;
446 }
447
448 static int
449 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
450 {
451         int ret;
452         struct virtqueue *vq;
453         struct virtio_crypto_hw *hw = dev->data->dev_private;
454
455         /* if virtio device has started, do not touch the virtqueues */
456         if (dev->data->dev_started)
457                 return 0;
458
459         PMD_INIT_FUNC_TRACE();
460
461         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
462                         0, SOCKET_ID_ANY, &vq);
463         if (ret < 0) {
464                 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
465                 return ret;
466         }
467
468         hw->cvq = vq;
469
470         return 0;
471 }
472
473 static void
474 virtio_crypto_free_queues(struct rte_cryptodev *dev)
475 {
476         unsigned int i;
477         struct virtio_crypto_hw *hw = dev->data->dev_private;
478
479         PMD_INIT_FUNC_TRACE();
480
481         /* control queue release */
482         virtio_crypto_queue_release(hw->cvq);
483
484         /* data queue release */
485         for (i = 0; i < hw->max_dataqueues; i++)
486                 virtio_crypto_queue_release(dev->data->queue_pairs[i]);
487 }
488
489 static int
490 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
491 {
492         return 0;
493 }
494
495 /*
496  * dev_ops for virtio, bare necessities for basic operation
497  */
498 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
499         /* Device related operations */
500         .dev_configure                   = virtio_crypto_dev_configure,
501         .dev_start                       = virtio_crypto_dev_start,
502         .dev_stop                        = virtio_crypto_dev_stop,
503         .dev_close                       = virtio_crypto_dev_close,
504         .dev_infos_get                   = virtio_crypto_dev_info_get,
505
506         .stats_get                       = virtio_crypto_dev_stats_get,
507         .stats_reset                     = virtio_crypto_dev_stats_reset,
508
509         .queue_pair_setup                = virtio_crypto_qp_setup,
510         .queue_pair_release              = virtio_crypto_qp_release,
511
512         /* Crypto related operations */
513         .sym_session_get_size           = virtio_crypto_sym_get_session_private_size,
514         .sym_session_configure          = virtio_crypto_sym_configure_session,
515         .sym_session_clear              = virtio_crypto_sym_clear_session
516 };
517
518 static void
519 virtio_crypto_update_stats(struct rte_cryptodev *dev,
520                 struct rte_cryptodev_stats *stats)
521 {
522         unsigned int i;
523         struct virtio_crypto_hw *hw = dev->data->dev_private;
524
525         PMD_INIT_FUNC_TRACE();
526
527         if (stats == NULL) {
528                 VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
529                 return;
530         }
531
532         for (i = 0; i < hw->max_dataqueues; i++) {
533                 const struct virtqueue *data_queue
534                         = dev->data->queue_pairs[i];
535                 if (data_queue == NULL)
536                         continue;
537
538                 stats->enqueued_count += data_queue->packets_sent_total;
539                 stats->enqueue_err_count += data_queue->packets_sent_failed;
540
541                 stats->dequeued_count += data_queue->packets_received_total;
542                 stats->dequeue_err_count
543                         += data_queue->packets_received_failed;
544         }
545 }
546
547 static void
548 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
549                 struct rte_cryptodev_stats *stats)
550 {
551         PMD_INIT_FUNC_TRACE();
552
553         virtio_crypto_update_stats(dev, stats);
554 }
555
556 static void
557 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
558 {
559         unsigned int i;
560         struct virtio_crypto_hw *hw = dev->data->dev_private;
561
562         PMD_INIT_FUNC_TRACE();
563
564         for (i = 0; i < hw->max_dataqueues; i++) {
565                 struct virtqueue *data_queue = dev->data->queue_pairs[i];
566                 if (data_queue == NULL)
567                         continue;
568
569                 data_queue->packets_sent_total = 0;
570                 data_queue->packets_sent_failed = 0;
571
572                 data_queue->packets_received_total = 0;
573                 data_queue->packets_received_failed = 0;
574         }
575 }
576
577 static int
578 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
579                 const struct rte_cryptodev_qp_conf *qp_conf,
580                 int socket_id)
581 {
582         int ret;
583         struct virtqueue *vq;
584
585         PMD_INIT_FUNC_TRACE();
586
587         /* if virtio dev is started, do not touch the virtqueues */
588         if (dev->data->dev_started)
589                 return 0;
590
591         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
592                         qp_conf->nb_descriptors, socket_id, &vq);
593         if (ret < 0) {
594                 VIRTIO_CRYPTO_INIT_LOG_ERR(
595                         "virtio crypto data queue initialization failed\n");
596                 return ret;
597         }
598
599         dev->data->queue_pairs[queue_pair_id] = vq;
600
601         return 0;
602 }
603
604 static int
605 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
606 {
607         struct virtqueue *vq
608                 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
609
610         PMD_INIT_FUNC_TRACE();
611
612         if (vq == NULL) {
613                 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
614                 return 0;
615         }
616
617         virtio_crypto_queue_release(vq);
618         return 0;
619 }
620
621 static int
622 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
623 {
624         uint64_t host_features;
625
626         PMD_INIT_FUNC_TRACE();
627
628         /* Prepare guest_features: feature that driver wants to support */
629         VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
630                 req_features);
631
632         /* Read device(host) feature bits */
633         host_features = VTPCI_OPS(hw)->get_features(hw);
634         VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
635                 host_features);
636
637         /*
638          * Negotiate features: Subset of device feature bits are written back
639          * guest feature bits.
640          */
641         hw->guest_features = req_features;
642         hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
643                                                         host_features);
644         VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
645                 hw->guest_features);
646
647         if (hw->modern) {
648                 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
649                         VIRTIO_CRYPTO_INIT_LOG_ERR(
650                                 "VIRTIO_F_VERSION_1 features is not enabled.");
651                         return -1;
652                 }
653                 vtpci_cryptodev_set_status(hw,
654                         VIRTIO_CONFIG_STATUS_FEATURES_OK);
655                 if (!(vtpci_cryptodev_get_status(hw) &
656                         VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
657                         VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
658                                                 "status!");
659                         return -1;
660                 }
661         }
662
663         hw->req_guest_features = req_features;
664
665         return 0;
666 }
667
668 /* reset device and renegotiate features if needed */
669 static int
670 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
671         uint64_t req_features)
672 {
673         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
674         struct virtio_crypto_config local_config;
675         struct virtio_crypto_config *config = &local_config;
676
677         PMD_INIT_FUNC_TRACE();
678
679         /* Reset the device although not necessary at startup */
680         vtpci_cryptodev_reset(hw);
681
682         /* Tell the host we've noticed this device. */
683         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
684
685         /* Tell the host we've known how to drive the device. */
686         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
687         if (virtio_negotiate_features(hw, req_features) < 0)
688                 return -1;
689
690         /* Get status of the device */
691         vtpci_read_cryptodev_config(hw,
692                 offsetof(struct virtio_crypto_config, status),
693                 &config->status, sizeof(config->status));
694         if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
695                 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
696                                 "not ready");
697                 return -1;
698         }
699
700         /* Get number of data queues */
701         vtpci_read_cryptodev_config(hw,
702                 offsetof(struct virtio_crypto_config, max_dataqueues),
703                 &config->max_dataqueues,
704                 sizeof(config->max_dataqueues));
705         hw->max_dataqueues = config->max_dataqueues;
706
707         VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
708                 hw->max_dataqueues);
709
710         return 0;
711 }
712
713 /*
714  * This function is based on probe() function
715  * It returns 0 on success.
716  */
717 static int
718 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
719                 struct rte_cryptodev_pmd_init_params *init_params)
720 {
721         struct rte_cryptodev *cryptodev;
722         struct virtio_crypto_hw *hw;
723
724         PMD_INIT_FUNC_TRACE();
725
726         cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
727                                         init_params);
728         if (cryptodev == NULL)
729                 return -ENODEV;
730
731         cryptodev->driver_id = cryptodev_virtio_driver_id;
732         cryptodev->dev_ops = &virtio_crypto_dev_ops;
733
734         cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
735         cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
736
737         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
738                 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
739                 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
740
741         hw = cryptodev->data->dev_private;
742         hw->dev_id = cryptodev->data->dev_id;
743         hw->virtio_dev_capabilities = virtio_capabilities;
744
745         VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
746                 cryptodev->data->dev_id, pci_dev->id.vendor_id,
747                 pci_dev->id.device_id);
748
749         /* pci device init */
750         if (vtpci_cryptodev_init(pci_dev, hw))
751                 return -1;
752
753         if (virtio_crypto_init_device(cryptodev,
754                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
755                 return -1;
756
757         rte_cryptodev_pmd_probing_finish(cryptodev);
758
759         return 0;
760 }
761
762 static int
763 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
764 {
765         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
766
767         PMD_INIT_FUNC_TRACE();
768
769         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
770                 return -EPERM;
771
772         if (cryptodev->data->dev_started) {
773                 virtio_crypto_dev_stop(cryptodev);
774                 virtio_crypto_dev_close(cryptodev);
775         }
776
777         cryptodev->dev_ops = NULL;
778         cryptodev->enqueue_burst = NULL;
779         cryptodev->dequeue_burst = NULL;
780
781         /* release control queue */
782         virtio_crypto_queue_release(hw->cvq);
783
784         rte_free(cryptodev->data);
785         cryptodev->data = NULL;
786
787         VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
788
789         return 0;
790 }
791
792 static int
793 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
794         struct rte_cryptodev_config *config __rte_unused)
795 {
796         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
797
798         PMD_INIT_FUNC_TRACE();
799
800         if (virtio_crypto_init_device(cryptodev,
801                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
802                 return -1;
803
804         /* setup control queue
805          * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
806          * config->max_dataqueues is the control queue
807          */
808         if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
809                 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
810                 return -1;
811         }
812         virtio_crypto_ctrlq_start(cryptodev);
813
814         return 0;
815 }
816
817 static void
818 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
819 {
820         struct virtio_crypto_hw *hw = dev->data->dev_private;
821
822         PMD_INIT_FUNC_TRACE();
823         VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
824
825         vtpci_cryptodev_reset(hw);
826
827         virtio_crypto_dev_free_mbufs(dev);
828         virtio_crypto_free_queues(dev);
829
830         dev->data->dev_started = 0;
831 }
832
833 static int
834 virtio_crypto_dev_start(struct rte_cryptodev *dev)
835 {
836         struct virtio_crypto_hw *hw = dev->data->dev_private;
837
838         if (dev->data->dev_started)
839                 return 0;
840
841         /* Do final configuration before queue engine starts */
842         virtio_crypto_dataq_start(dev);
843         vtpci_cryptodev_reinit_complete(hw);
844
845         dev->data->dev_started = 1;
846
847         return 0;
848 }
849
850 static void
851 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
852 {
853         uint32_t i;
854         struct virtio_crypto_hw *hw = dev->data->dev_private;
855
856         for (i = 0; i < hw->max_dataqueues; i++) {
857                 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
858                         "and unused buf", i);
859                 VIRTQUEUE_DUMP((struct virtqueue *)
860                         dev->data->queue_pairs[i]);
861
862                 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
863                                 i, dev->data->queue_pairs[i]);
864
865                 virtqueue_detatch_unused(dev->data->queue_pairs[i]);
866
867                 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
868                                         "unused buf", i);
869                 VIRTQUEUE_DUMP(
870                         (struct virtqueue *)dev->data->queue_pairs[i]);
871         }
872 }
873
874 static unsigned int
875 virtio_crypto_sym_get_session_private_size(
876                 struct rte_cryptodev *dev __rte_unused)
877 {
878         PMD_INIT_FUNC_TRACE();
879
880         return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
881 }
882
883 static int
884 virtio_crypto_check_sym_session_paras(
885                 struct rte_cryptodev *dev)
886 {
887         struct virtio_crypto_hw *hw;
888
889         PMD_INIT_FUNC_TRACE();
890
891         if (unlikely(dev == NULL)) {
892                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
893                 return -1;
894         }
895         if (unlikely(dev->data == NULL)) {
896                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
897                 return -1;
898         }
899         hw = dev->data->dev_private;
900         if (unlikely(hw == NULL)) {
901                 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
902                 return -1;
903         }
904         if (unlikely(hw->cvq == NULL)) {
905                 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
906                 return -1;
907         }
908
909         return 0;
910 }
911
912 static int
913 virtio_crypto_check_sym_clear_session_paras(
914                 struct rte_cryptodev *dev,
915                 struct rte_cryptodev_sym_session *sess)
916 {
917         PMD_INIT_FUNC_TRACE();
918
919         if (sess == NULL) {
920                 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
921                 return -1;
922         }
923
924         return virtio_crypto_check_sym_session_paras(dev);
925 }
926
927 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
928
929 static void
930 virtio_crypto_sym_clear_session(
931                 struct rte_cryptodev *dev,
932                 struct rte_cryptodev_sym_session *sess)
933 {
934         struct virtio_crypto_hw *hw;
935         struct virtqueue *vq;
936         struct virtio_crypto_session *session;
937         struct virtio_crypto_op_ctrl_req *ctrl;
938         struct vring_desc *desc;
939         uint8_t *status;
940         uint8_t needed = 1;
941         uint32_t head;
942         uint8_t *malloc_virt_addr;
943         uint64_t malloc_phys_addr;
944         uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
945         uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
946         uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
947
948         PMD_INIT_FUNC_TRACE();
949
950         if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
951                 return;
952
953         hw = dev->data->dev_private;
954         vq = hw->cvq;
955         session = (struct virtio_crypto_session *)get_sym_session_private_data(
956                 sess, cryptodev_virtio_driver_id);
957         if (session == NULL) {
958                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
959                 return;
960         }
961
962         VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
963                         "vq = %p", vq->vq_desc_head_idx, vq);
964
965         if (vq->vq_free_cnt < needed) {
966                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
967                                 "vq->vq_free_cnt = %d is less than %d, "
968                                 "not enough", vq->vq_free_cnt, needed);
969                 return;
970         }
971
972         /*
973          * malloc memory to store information of ctrl request op,
974          * returned status and desc vring
975          */
976         malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
977                 + NUM_ENTRY_SYM_CLEAR_SESSION
978                 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
979         if (malloc_virt_addr == NULL) {
980                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
981                 return;
982         }
983         malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
984
985         /* assign ctrl request op part */
986         ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
987         ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
988         /* default data virtqueue is 0 */
989         ctrl->header.queue_id = 0;
990         ctrl->u.destroy_session.session_id = session->session_id;
991
992         /* status part */
993         status = &(((struct virtio_crypto_inhdr *)
994                 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
995         *status = VIRTIO_CRYPTO_ERR;
996
997         /* indirect desc vring part */
998         desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
999                 + desc_offset);
1000
1001         /* ctrl request part */
1002         desc[0].addr = malloc_phys_addr;
1003         desc[0].len = len_op_ctrl_req;
1004         desc[0].flags = VRING_DESC_F_NEXT;
1005         desc[0].next = 1;
1006
1007         /* status part */
1008         desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
1009         desc[1].len = len_inhdr;
1010         desc[1].flags = VRING_DESC_F_WRITE;
1011
1012         /* use only a single desc entry */
1013         head = vq->vq_desc_head_idx;
1014         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
1015         vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
1016         vq->vq_ring.desc[head].len
1017                 = NUM_ENTRY_SYM_CLEAR_SESSION
1018                 * sizeof(struct vring_desc);
1019         vq->vq_free_cnt -= needed;
1020
1021         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
1022
1023         vq_update_avail_ring(vq, head);
1024         vq_update_avail_idx(vq);
1025
1026         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
1027                                         vq->vq_queue_index);
1028
1029         virtqueue_notify(vq);
1030
1031         rte_rmb();
1032         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
1033                 rte_rmb();
1034                 usleep(100);
1035         }
1036
1037         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
1038                 uint32_t idx, desc_idx, used_idx;
1039                 struct vring_used_elem *uep;
1040
1041                 used_idx = (uint32_t)(vq->vq_used_cons_idx
1042                                 & (vq->vq_nentries - 1));
1043                 uep = &vq->vq_ring.used->ring[used_idx];
1044                 idx = (uint32_t) uep->id;
1045                 desc_idx = idx;
1046                 while (vq->vq_ring.desc[desc_idx].flags
1047                                 & VRING_DESC_F_NEXT) {
1048                         desc_idx = vq->vq_ring.desc[desc_idx].next;
1049                         vq->vq_free_cnt++;
1050                 }
1051
1052                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
1053                 vq->vq_desc_head_idx = idx;
1054                 vq->vq_used_cons_idx++;
1055                 vq->vq_free_cnt++;
1056         }
1057
1058         if (*status != VIRTIO_CRYPTO_OK) {
1059                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1060                                 "status=%"PRIu32", session_id=%"PRIu64"",
1061                                 *status, session->session_id);
1062                 rte_free(malloc_virt_addr);
1063                 return;
1064         }
1065
1066         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
1067                         "vq->vq_desc_head_idx=%d",
1068                         vq->vq_free_cnt, vq->vq_desc_head_idx);
1069
1070         VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1071                         session->session_id);
1072
1073         memset(session, 0, sizeof(struct virtio_crypto_session));
1074         struct rte_mempool *sess_mp = rte_mempool_from_obj(session);
1075         set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL);
1076         rte_mempool_put(sess_mp, session);
1077         rte_free(malloc_virt_addr);
1078 }
1079
1080 static struct rte_crypto_cipher_xform *
1081 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1082 {
1083         do {
1084                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1085                         return &xform->cipher;
1086
1087                 xform = xform->next;
1088         } while (xform);
1089
1090         return NULL;
1091 }
1092
1093 static struct rte_crypto_auth_xform *
1094 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1095 {
1096         do {
1097                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1098                         return &xform->auth;
1099
1100                 xform = xform->next;
1101         } while (xform);
1102
1103         return NULL;
1104 }
1105
1106 /** Get xform chain order */
1107 static int
1108 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1109 {
1110         if (xform == NULL)
1111                 return -1;
1112
1113         /* Cipher Only */
1114         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1115                         xform->next == NULL)
1116                 return VIRTIO_CRYPTO_CMD_CIPHER;
1117
1118         /* Authentication Only */
1119         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1120                         xform->next == NULL)
1121                 return VIRTIO_CRYPTO_CMD_AUTH;
1122
1123         /* Authenticate then Cipher */
1124         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1125                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1126                 return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1127
1128         /* Cipher then Authenticate */
1129         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1130                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1131                 return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1132
1133         return -1;
1134 }
1135
1136 static int
1137 virtio_crypto_sym_pad_cipher_param(
1138                 struct virtio_crypto_cipher_session_para *para,
1139                 struct rte_crypto_cipher_xform *cipher_xform)
1140 {
1141         switch (cipher_xform->algo) {
1142         case RTE_CRYPTO_CIPHER_AES_CBC:
1143                 para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
1144                 break;
1145         default:
1146                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1147                                 "Cipher alg %u", cipher_xform->algo);
1148                 return -1;
1149         }
1150
1151         para->keylen = cipher_xform->key.length;
1152         switch (cipher_xform->op) {
1153         case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1154                 para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1155                 break;
1156         case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1157                 para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1158                 break;
1159         default:
1160                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1161                                         "parameter");
1162                 return -1;
1163         }
1164
1165         return 0;
1166 }
1167
1168 static int
1169 virtio_crypto_sym_pad_auth_param(
1170                 struct virtio_crypto_op_ctrl_req *ctrl,
1171                 struct rte_crypto_auth_xform *auth_xform)
1172 {
1173         uint32_t *algo;
1174         struct virtio_crypto_alg_chain_session_para *para =
1175                 &(ctrl->u.sym_create_session.u.chain.para);
1176
1177         switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1178         case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1179                 algo = &(para->u.hash_param.algo);
1180                 break;
1181         case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1182                 algo = &(para->u.mac_param.algo);
1183                 break;
1184         default:
1185                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1186                         "specified",
1187                         ctrl->u.sym_create_session.u.chain.para.hash_mode);
1188                 return -1;
1189         }
1190
1191         switch (auth_xform->algo) {
1192         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1193                 *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
1194                 break;
1195         default:
1196                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1197                         "Crypto: Undefined Hash algo %u specified",
1198                         auth_xform->algo);
1199                 return -1;
1200         }
1201
1202         return 0;
1203 }
1204
1205 static int
1206 virtio_crypto_sym_pad_op_ctrl_req(
1207                 struct virtio_crypto_op_ctrl_req *ctrl,
1208                 struct rte_crypto_sym_xform *xform, bool is_chainned,
1209                 uint8_t *cipher_key_data, uint8_t *auth_key_data,
1210                 struct virtio_crypto_session *session)
1211 {
1212         int ret;
1213         struct rte_crypto_auth_xform *auth_xform = NULL;
1214         struct rte_crypto_cipher_xform *cipher_xform = NULL;
1215
1216         /* Get cipher xform from crypto xform chain */
1217         cipher_xform = virtio_crypto_get_cipher_xform(xform);
1218         if (cipher_xform) {
1219                 if (cipher_xform->key.length > VIRTIO_CRYPTO_MAX_KEY_SIZE) {
1220                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1221                                 "cipher key size cannot be longer than %u",
1222                                 VIRTIO_CRYPTO_MAX_KEY_SIZE);
1223                         return -1;
1224                 }
1225                 if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
1226                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1227                                 "cipher IV size cannot be longer than %u",
1228                                 VIRTIO_CRYPTO_MAX_IV_SIZE);
1229                         return -1;
1230                 }
1231                 if (is_chainned)
1232                         ret = virtio_crypto_sym_pad_cipher_param(
1233                                 &ctrl->u.sym_create_session.u.chain.para
1234                                                 .cipher_param, cipher_xform);
1235                 else
1236                         ret = virtio_crypto_sym_pad_cipher_param(
1237                                 &ctrl->u.sym_create_session.u.cipher.para,
1238                                 cipher_xform);
1239
1240                 if (ret < 0) {
1241                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1242                                 "pad cipher parameter failed");
1243                         return -1;
1244                 }
1245
1246                 memcpy(cipher_key_data, cipher_xform->key.data,
1247                                 cipher_xform->key.length);
1248
1249                 session->iv.offset = cipher_xform->iv.offset;
1250                 session->iv.length = cipher_xform->iv.length;
1251         }
1252
1253         /* Get auth xform from crypto xform chain */
1254         auth_xform = virtio_crypto_get_auth_xform(xform);
1255         if (auth_xform) {
1256                 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1257                 struct virtio_crypto_alg_chain_session_para *para =
1258                         &(ctrl->u.sym_create_session.u.chain.para);
1259                 if (auth_xform->key.length) {
1260                         if (auth_xform->key.length >
1261                                         VIRTIO_CRYPTO_MAX_KEY_SIZE) {
1262                                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1263                                 "auth key size cannot be longer than %u",
1264                                         VIRTIO_CRYPTO_MAX_KEY_SIZE);
1265                                 return -1;
1266                         }
1267                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1268                         para->u.mac_param.auth_key_len =
1269                                 (uint32_t)auth_xform->key.length;
1270                         para->u.mac_param.hash_result_len =
1271                                 auth_xform->digest_length;
1272                         memcpy(auth_key_data, auth_xform->key.data,
1273                                         auth_xform->key.length);
1274                 } else {
1275                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1276                         para->u.hash_param.hash_result_len =
1277                                 auth_xform->digest_length;
1278                 }
1279
1280                 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1281                 if (ret < 0) {
1282                         VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1283                                                 "failed");
1284                         return -1;
1285                 }
1286         }
1287
1288         return 0;
1289 }
1290
1291 static int
1292 virtio_crypto_check_sym_configure_session_paras(
1293                 struct rte_cryptodev *dev,
1294                 struct rte_crypto_sym_xform *xform,
1295                 struct rte_cryptodev_sym_session *sym_sess,
1296                 struct rte_mempool *mempool)
1297 {
1298         if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
1299                 unlikely(mempool == NULL)) {
1300                 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1301                 return -1;
1302         }
1303
1304         if (virtio_crypto_check_sym_session_paras(dev) < 0)
1305                 return -1;
1306
1307         return 0;
1308 }
1309
1310 static int
1311 virtio_crypto_sym_configure_session(
1312                 struct rte_cryptodev *dev,
1313                 struct rte_crypto_sym_xform *xform,
1314                 struct rte_cryptodev_sym_session *sess,
1315                 struct rte_mempool *mempool)
1316 {
1317         int ret;
1318         struct virtio_crypto_session crypto_sess;
1319         void *session_private = &crypto_sess;
1320         struct virtio_crypto_session *session;
1321         struct virtio_crypto_op_ctrl_req *ctrl_req;
1322         enum virtio_crypto_cmd_id cmd_id;
1323         uint8_t cipher_key_data[VIRTIO_CRYPTO_MAX_KEY_SIZE] = {0};
1324         uint8_t auth_key_data[VIRTIO_CRYPTO_MAX_KEY_SIZE] = {0};
1325         struct virtio_crypto_hw *hw;
1326         struct virtqueue *control_vq;
1327
1328         PMD_INIT_FUNC_TRACE();
1329
1330         ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1331                         sess, mempool);
1332         if (ret < 0) {
1333                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1334                 return ret;
1335         }
1336
1337         if (rte_mempool_get(mempool, &session_private)) {
1338                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1339                         "Couldn't get object from session mempool");
1340                 return -ENOMEM;
1341         }
1342
1343         session = (struct virtio_crypto_session *)session_private;
1344         memset(session, 0, sizeof(struct virtio_crypto_session));
1345         ctrl_req = &session->ctrl;
1346         ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1347         /* FIXME: support multiqueue */
1348         ctrl_req->header.queue_id = 0;
1349
1350         hw = dev->data->dev_private;
1351         control_vq = hw->cvq;
1352
1353         cmd_id = virtio_crypto_get_chain_order(xform);
1354         if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1355                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1356                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1357         if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1358                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1359                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1360
1361         switch (cmd_id) {
1362         case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1363         case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1364                 ctrl_req->u.sym_create_session.op_type
1365                         = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1366
1367                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1368                         xform, true, cipher_key_data, auth_key_data, session);
1369                 if (ret < 0) {
1370                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1371                                 "padding sym op ctrl req failed");
1372                         goto error_out;
1373                 }
1374                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1375                         cipher_key_data, auth_key_data, session);
1376                 if (ret < 0) {
1377                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1378                                 "create session failed: %d", ret);
1379                         goto error_out;
1380                 }
1381                 break;
1382         case VIRTIO_CRYPTO_CMD_CIPHER:
1383                 ctrl_req->u.sym_create_session.op_type
1384                         = VIRTIO_CRYPTO_SYM_OP_CIPHER;
1385                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1386                         false, cipher_key_data, auth_key_data, session);
1387                 if (ret < 0) {
1388                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1389                                 "padding sym op ctrl req failed");
1390                         goto error_out;
1391                 }
1392                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1393                         cipher_key_data, NULL, session);
1394                 if (ret < 0) {
1395                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1396                                 "create session failed: %d", ret);
1397                         goto error_out;
1398                 }
1399                 break;
1400         default:
1401                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1402                         "Unsupported operation chain order parameter");
1403                 goto error_out;
1404         }
1405
1406         set_sym_session_private_data(sess, dev->driver_id,
1407                 session_private);
1408
1409         return 0;
1410
1411 error_out:
1412         return -1;
1413 }
1414
1415 static void
1416 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1417                 struct rte_cryptodev_info *info)
1418 {
1419         struct virtio_crypto_hw *hw = dev->data->dev_private;
1420
1421         PMD_INIT_FUNC_TRACE();
1422
1423         if (info != NULL) {
1424                 info->driver_id = cryptodev_virtio_driver_id;
1425                 info->feature_flags = dev->feature_flags;
1426                 info->max_nb_queue_pairs = hw->max_dataqueues;
1427                 /* No limit of number of sessions */
1428                 info->sym.max_nb_sessions = 0;
1429                 info->capabilities = hw->virtio_dev_capabilities;
1430         }
1431 }
1432
1433 static int
1434 crypto_virtio_pci_probe(
1435         struct rte_pci_driver *pci_drv __rte_unused,
1436         struct rte_pci_device *pci_dev)
1437 {
1438         struct rte_cryptodev_pmd_init_params init_params = {
1439                 .name = "",
1440                 .socket_id = pci_dev->device.numa_node,
1441                 .private_data_size = sizeof(struct virtio_crypto_hw)
1442         };
1443         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1444
1445         VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1446                         pci_dev->addr.bus,
1447                         pci_dev->addr.devid,
1448                         pci_dev->addr.function);
1449
1450         rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1451
1452         return crypto_virtio_create(name, pci_dev, &init_params);
1453 }
1454
1455 static int
1456 crypto_virtio_pci_remove(
1457         struct rte_pci_device *pci_dev __rte_unused)
1458 {
1459         struct rte_cryptodev *cryptodev;
1460         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1461
1462         if (pci_dev == NULL)
1463                 return -EINVAL;
1464
1465         rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1466                         sizeof(cryptodev_name));
1467
1468         cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1469         if (cryptodev == NULL)
1470                 return -ENODEV;
1471
1472         return virtio_crypto_dev_uninit(cryptodev);
1473 }
1474
1475 static struct rte_pci_driver rte_virtio_crypto_driver = {
1476         .id_table = pci_id_virtio_crypto_map,
1477         .drv_flags = 0,
1478         .probe = crypto_virtio_pci_probe,
1479         .remove = crypto_virtio_pci_remove
1480 };
1481
1482 static struct cryptodev_driver virtio_crypto_drv;
1483
1484 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1485 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1486         rte_virtio_crypto_driver.driver,
1487         cryptodev_virtio_driver_id);
1488 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_init, init, NOTICE);
1489 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_session, session, NOTICE);
1490 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_rx, rx, NOTICE);
1491 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_tx, tx, NOTICE);
1492 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_driver, driver, NOTICE);