ce5816b751f805b6ad0d7a0c8fbf6572e8409e18
[dpdk.git] / drivers / crypto / virtio / virtio_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 #include <stdbool.h>
5 #include <unistd.h>
6
7 #include <rte_common.h>
8 #include <rte_errno.h>
9 #include <rte_pci.h>
10 #include <rte_bus_pci.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_eal.h>
14
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
18 #include "virtio_crypto_capabilities.h"
19
20 int virtio_crypto_logtype_init;
21 int virtio_crypto_logtype_session;
22 int virtio_crypto_logtype_rx;
23 int virtio_crypto_logtype_tx;
24 int virtio_crypto_logtype_driver;
25
26 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
27                 struct rte_cryptodev_config *config);
28 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
29 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
30 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
31 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
32                 struct rte_cryptodev_info *dev_info);
33 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
34                 struct rte_cryptodev_stats *stats);
35 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
36 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
37                 uint16_t queue_pair_id,
38                 const struct rte_cryptodev_qp_conf *qp_conf,
39                 int socket_id,
40                 struct rte_mempool *session_pool);
41 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
42                 uint16_t queue_pair_id);
43 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
44 static unsigned int virtio_crypto_sym_get_session_private_size(
45                 struct rte_cryptodev *dev);
46 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
47                 struct rte_cryptodev_sym_session *sess);
48 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
49                 struct rte_crypto_sym_xform *xform,
50                 struct rte_cryptodev_sym_session *session,
51                 struct rte_mempool *mp);
52
53 /*
54  * The set of PCI devices this driver supports
55  */
56 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
57         { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
58                                 VIRTIO_CRYPTO_PCI_DEVICEID) },
59         { .vendor_id = 0, /* sentinel */ },
60 };
61
62 static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
63         VIRTIO_SYM_CAPABILITIES,
64         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
65 };
66
67 uint8_t cryptodev_virtio_driver_id;
68
69 #define NUM_ENTRY_SYM_CREATE_SESSION 4
70
71 static int
72 virtio_crypto_send_command(struct virtqueue *vq,
73                 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
74                 uint8_t *auth_key, struct virtio_crypto_session *session)
75 {
76         uint8_t idx = 0;
77         uint8_t needed = 1;
78         uint32_t head = 0;
79         uint32_t len_cipher_key = 0;
80         uint32_t len_auth_key = 0;
81         uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
82         uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
83         uint32_t len_total = 0;
84         uint32_t input_offset = 0;
85         void *virt_addr_started = NULL;
86         phys_addr_t phys_addr_started;
87         struct vring_desc *desc;
88         uint32_t desc_offset;
89         struct virtio_crypto_session_input *input;
90         int ret;
91
92         PMD_INIT_FUNC_TRACE();
93
94         if (session == NULL) {
95                 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
96                 return -EINVAL;
97         }
98         /* cipher only is supported, it is available if auth_key is NULL */
99         if (!cipher_key) {
100                 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
101                 return -EINVAL;
102         }
103
104         head = vq->vq_desc_head_idx;
105         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
106                                         head, vq);
107
108         if (vq->vq_free_cnt < needed) {
109                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
110                 return -ENOSPC;
111         }
112
113         /* calculate the length of cipher key */
114         if (cipher_key) {
115                 switch (ctrl->u.sym_create_session.op_type) {
116                 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
117                         len_cipher_key
118                                 = ctrl->u.sym_create_session.u.cipher
119                                                         .para.keylen;
120                         break;
121                 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
122                         len_cipher_key
123                                 = ctrl->u.sym_create_session.u.chain
124                                         .para.cipher_param.keylen;
125                         break;
126                 default:
127                         VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
128                         return -EINVAL;
129                 }
130         }
131
132         /* calculate the length of auth key */
133         if (auth_key) {
134                 len_auth_key =
135                         ctrl->u.sym_create_session.u.chain.para.u.mac_param
136                                 .auth_key_len;
137         }
138
139         /*
140          * malloc memory to store indirect vring_desc entries, including
141          * ctrl request, cipher key, auth key, session input and desc vring
142          */
143         desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
144                 + len_session_input;
145         virt_addr_started = rte_malloc(NULL,
146                 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
147                         * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
148         if (virt_addr_started == NULL) {
149                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
150                 return -ENOSPC;
151         }
152         phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
153
154         /* address to store indirect vring desc entries */
155         desc = (struct vring_desc *)
156                 ((uint8_t *)virt_addr_started + desc_offset);
157
158         /*  ctrl req part */
159         memcpy(virt_addr_started, ctrl, len_ctrl_req);
160         desc[idx].addr = phys_addr_started;
161         desc[idx].len = len_ctrl_req;
162         desc[idx].flags = VRING_DESC_F_NEXT;
163         desc[idx].next = idx + 1;
164         idx++;
165         len_total += len_ctrl_req;
166         input_offset += len_ctrl_req;
167
168         /* cipher key part */
169         if (len_cipher_key > 0) {
170                 memcpy((uint8_t *)virt_addr_started + len_total,
171                         cipher_key, len_cipher_key);
172
173                 desc[idx].addr = phys_addr_started + len_total;
174                 desc[idx].len = len_cipher_key;
175                 desc[idx].flags = VRING_DESC_F_NEXT;
176                 desc[idx].next = idx + 1;
177                 idx++;
178                 len_total += len_cipher_key;
179                 input_offset += len_cipher_key;
180         }
181
182         /* auth key part */
183         if (len_auth_key > 0) {
184                 memcpy((uint8_t *)virt_addr_started + len_total,
185                         auth_key, len_auth_key);
186
187                 desc[idx].addr = phys_addr_started + len_total;
188                 desc[idx].len = len_auth_key;
189                 desc[idx].flags = VRING_DESC_F_NEXT;
190                 desc[idx].next = idx + 1;
191                 idx++;
192                 len_total += len_auth_key;
193                 input_offset += len_auth_key;
194         }
195
196         /* input part */
197         input = (struct virtio_crypto_session_input *)
198                 ((uint8_t *)virt_addr_started + input_offset);
199         input->status = VIRTIO_CRYPTO_ERR;
200         input->session_id = ~0ULL;
201         desc[idx].addr = phys_addr_started + len_total;
202         desc[idx].len = len_session_input;
203         desc[idx].flags = VRING_DESC_F_WRITE;
204         idx++;
205
206         /* use a single desc entry */
207         vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
208         vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
209         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
210         vq->vq_free_cnt--;
211
212         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
213
214         vq_update_avail_ring(vq, head);
215         vq_update_avail_idx(vq);
216
217         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
218                                         vq->vq_queue_index);
219
220         virtqueue_notify(vq);
221
222         rte_rmb();
223         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
224                 rte_rmb();
225                 usleep(100);
226         }
227
228         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
229                 uint32_t idx, desc_idx, used_idx;
230                 struct vring_used_elem *uep;
231
232                 used_idx = (uint32_t)(vq->vq_used_cons_idx
233                                 & (vq->vq_nentries - 1));
234                 uep = &vq->vq_ring.used->ring[used_idx];
235                 idx = (uint32_t) uep->id;
236                 desc_idx = idx;
237
238                 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
239                         desc_idx = vq->vq_ring.desc[desc_idx].next;
240                         vq->vq_free_cnt++;
241                 }
242
243                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
244                 vq->vq_desc_head_idx = idx;
245
246                 vq->vq_used_cons_idx++;
247                 vq->vq_free_cnt++;
248         }
249
250         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
251                         "vq->vq_desc_head_idx=%d",
252                         vq->vq_free_cnt, vq->vq_desc_head_idx);
253
254         /* get the result */
255         if (input->status != VIRTIO_CRYPTO_OK) {
256                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
257                                 "status=%u, session_id=%" PRIu64 "",
258                                 input->status, input->session_id);
259                 rte_free(virt_addr_started);
260                 ret = -1;
261         } else {
262                 session->session_id = input->session_id;
263
264                 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
265                                 "session_id=%" PRIu64 "", input->session_id);
266                 rte_free(virt_addr_started);
267                 ret = 0;
268         }
269
270         return ret;
271 }
272
273 void
274 virtio_crypto_queue_release(struct virtqueue *vq)
275 {
276         struct virtio_crypto_hw *hw;
277
278         PMD_INIT_FUNC_TRACE();
279
280         if (vq) {
281                 hw = vq->hw;
282                 /* Select and deactivate the queue */
283                 VTPCI_OPS(hw)->del_queue(hw, vq);
284
285                 rte_memzone_free(vq->mz);
286                 rte_mempool_free(vq->mpool);
287                 rte_free(vq);
288         }
289 }
290
291 #define MPOOL_MAX_NAME_SZ 32
292
293 int
294 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
295                 int queue_type,
296                 uint16_t vtpci_queue_idx,
297                 uint16_t nb_desc,
298                 int socket_id,
299                 struct virtqueue **pvq)
300 {
301         char vq_name[VIRTQUEUE_MAX_NAME_SZ];
302         char mpool_name[MPOOL_MAX_NAME_SZ];
303         const struct rte_memzone *mz;
304         unsigned int vq_size, size;
305         struct virtio_crypto_hw *hw = dev->data->dev_private;
306         struct virtqueue *vq = NULL;
307         uint32_t i = 0;
308         uint32_t j;
309
310         PMD_INIT_FUNC_TRACE();
311
312         VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
313
314         /*
315          * Read the virtqueue size from the Queue Size field
316          * Always power of 2 and if 0 virtqueue does not exist
317          */
318         vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
319         if (vq_size == 0) {
320                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
321                 return -EINVAL;
322         }
323         VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
324
325         if (!rte_is_power_of_2(vq_size)) {
326                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
327                 return -EINVAL;
328         }
329
330         if (queue_type == VTCRYPTO_DATAQ) {
331                 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
332                                 dev->data->dev_id, vtpci_queue_idx);
333                 snprintf(mpool_name, sizeof(mpool_name),
334                                 "dev%d_dataqueue%d_mpool",
335                                 dev->data->dev_id, vtpci_queue_idx);
336         } else if (queue_type == VTCRYPTO_CTRLQ) {
337                 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
338                                 dev->data->dev_id);
339                 snprintf(mpool_name, sizeof(mpool_name),
340                                 "dev%d_controlqueue_mpool",
341                                 dev->data->dev_id);
342         }
343         size = RTE_ALIGN_CEIL(sizeof(*vq) +
344                                 vq_size * sizeof(struct vq_desc_extra),
345                                 RTE_CACHE_LINE_SIZE);
346         vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
347                                 socket_id);
348         if (vq == NULL) {
349                 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
350                 return -ENOMEM;
351         }
352
353         if (queue_type == VTCRYPTO_DATAQ) {
354                 /* pre-allocate a mempool and use it in the data plane to
355                  * improve performance
356                  */
357                 vq->mpool = rte_mempool_lookup(mpool_name);
358                 if (vq->mpool == NULL)
359                         vq->mpool = rte_mempool_create(mpool_name,
360                                         vq_size,
361                                         sizeof(struct virtio_crypto_op_cookie),
362                                         RTE_CACHE_LINE_SIZE, 0,
363                                         NULL, NULL, NULL, NULL, socket_id,
364                                         0);
365                 if (!vq->mpool) {
366                         VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
367                                         "Cannot create mempool");
368                         goto mpool_create_err;
369                 }
370                 for (i = 0; i < vq_size; i++) {
371                         vq->vq_descx[i].cookie =
372                                 rte_zmalloc("crypto PMD op cookie pointer",
373                                         sizeof(struct virtio_crypto_op_cookie),
374                                         RTE_CACHE_LINE_SIZE);
375                         if (vq->vq_descx[i].cookie == NULL) {
376                                 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
377                                                 "alloc mem for cookie");
378                                 goto cookie_alloc_err;
379                         }
380                 }
381         }
382
383         vq->hw = hw;
384         vq->dev_id = dev->data->dev_id;
385         vq->vq_queue_index = vtpci_queue_idx;
386         vq->vq_nentries = vq_size;
387
388         /*
389          * Using part of the vring entries is permitted, but the maximum
390          * is vq_size
391          */
392         if (nb_desc == 0 || nb_desc > vq_size)
393                 nb_desc = vq_size;
394         vq->vq_free_cnt = nb_desc;
395
396         /*
397          * Reserve a memzone for vring elements
398          */
399         size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
400         vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
401         VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
402                         (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
403                         size, vq->vq_ring_size);
404
405         mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
406                         socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
407         if (mz == NULL) {
408                 if (rte_errno == EEXIST)
409                         mz = rte_memzone_lookup(vq_name);
410                 if (mz == NULL) {
411                         VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
412                         goto mz_reserve_err;
413                 }
414         }
415
416         /*
417          * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
418          * and only accepts 32 bit page frame number.
419          * Check if the allocated physical memory exceeds 16TB.
420          */
421         if ((mz->phys_addr + vq->vq_ring_size - 1)
422                                 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
423                 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
424                                         "above 16TB!");
425                 goto vring_addr_err;
426         }
427
428         memset(mz->addr, 0, sizeof(mz->len));
429         vq->mz = mz;
430         vq->vq_ring_mem = mz->phys_addr;
431         vq->vq_ring_virt_mem = mz->addr;
432         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
433                                         (uint64_t)mz->phys_addr);
434         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
435                                         (uint64_t)(uintptr_t)mz->addr);
436
437         *pvq = vq;
438
439         return 0;
440
441 vring_addr_err:
442         rte_memzone_free(mz);
443 mz_reserve_err:
444 cookie_alloc_err:
445         rte_mempool_free(vq->mpool);
446         if (i != 0) {
447                 for (j = 0; j < i; j++)
448                         rte_free(vq->vq_descx[j].cookie);
449         }
450 mpool_create_err:
451         rte_free(vq);
452         return -ENOMEM;
453 }
454
455 static int
456 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
457 {
458         int ret;
459         struct virtqueue *vq;
460         struct virtio_crypto_hw *hw = dev->data->dev_private;
461
462         /* if virtio device has started, do not touch the virtqueues */
463         if (dev->data->dev_started)
464                 return 0;
465
466         PMD_INIT_FUNC_TRACE();
467
468         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
469                         0, SOCKET_ID_ANY, &vq);
470         if (ret < 0) {
471                 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
472                 return ret;
473         }
474
475         hw->cvq = vq;
476
477         return 0;
478 }
479
480 static void
481 virtio_crypto_free_queues(struct rte_cryptodev *dev)
482 {
483         unsigned int i;
484         struct virtio_crypto_hw *hw = dev->data->dev_private;
485
486         PMD_INIT_FUNC_TRACE();
487
488         /* control queue release */
489         virtio_crypto_queue_release(hw->cvq);
490
491         /* data queue release */
492         for (i = 0; i < hw->max_dataqueues; i++)
493                 virtio_crypto_queue_release(dev->data->queue_pairs[i]);
494 }
495
496 static int
497 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
498 {
499         return 0;
500 }
501
502 /*
503  * dev_ops for virtio, bare necessities for basic operation
504  */
505 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
506         /* Device related operations */
507         .dev_configure                   = virtio_crypto_dev_configure,
508         .dev_start                       = virtio_crypto_dev_start,
509         .dev_stop                        = virtio_crypto_dev_stop,
510         .dev_close                       = virtio_crypto_dev_close,
511         .dev_infos_get                   = virtio_crypto_dev_info_get,
512
513         .stats_get                       = virtio_crypto_dev_stats_get,
514         .stats_reset                     = virtio_crypto_dev_stats_reset,
515
516         .queue_pair_setup                = virtio_crypto_qp_setup,
517         .queue_pair_release              = virtio_crypto_qp_release,
518         .queue_pair_count                = NULL,
519
520         /* Crypto related operations */
521         .session_get_size       = virtio_crypto_sym_get_session_private_size,
522         .session_configure      = virtio_crypto_sym_configure_session,
523         .session_clear          = virtio_crypto_sym_clear_session,
524         .qp_attach_session = NULL,
525         .qp_detach_session = NULL
526 };
527
528 static void
529 virtio_crypto_update_stats(struct rte_cryptodev *dev,
530                 struct rte_cryptodev_stats *stats)
531 {
532         unsigned int i;
533         struct virtio_crypto_hw *hw = dev->data->dev_private;
534
535         PMD_INIT_FUNC_TRACE();
536
537         if (stats == NULL) {
538                 VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
539                 return;
540         }
541
542         for (i = 0; i < hw->max_dataqueues; i++) {
543                 const struct virtqueue *data_queue
544                         = dev->data->queue_pairs[i];
545                 if (data_queue == NULL)
546                         continue;
547
548                 stats->enqueued_count += data_queue->packets_sent_total;
549                 stats->enqueue_err_count += data_queue->packets_sent_failed;
550
551                 stats->dequeued_count += data_queue->packets_received_total;
552                 stats->dequeue_err_count
553                         += data_queue->packets_received_failed;
554         }
555 }
556
557 static void
558 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
559                 struct rte_cryptodev_stats *stats)
560 {
561         PMD_INIT_FUNC_TRACE();
562
563         virtio_crypto_update_stats(dev, stats);
564 }
565
566 static void
567 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
568 {
569         unsigned int i;
570         struct virtio_crypto_hw *hw = dev->data->dev_private;
571
572         PMD_INIT_FUNC_TRACE();
573
574         for (i = 0; i < hw->max_dataqueues; i++) {
575                 struct virtqueue *data_queue = dev->data->queue_pairs[i];
576                 if (data_queue == NULL)
577                         continue;
578
579                 data_queue->packets_sent_total = 0;
580                 data_queue->packets_sent_failed = 0;
581
582                 data_queue->packets_received_total = 0;
583                 data_queue->packets_received_failed = 0;
584         }
585 }
586
587 static int
588 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
589                 const struct rte_cryptodev_qp_conf *qp_conf,
590                 int socket_id,
591                 struct rte_mempool *session_pool __rte_unused)
592 {
593         int ret;
594         struct virtqueue *vq;
595
596         PMD_INIT_FUNC_TRACE();
597
598         /* if virtio dev is started, do not touch the virtqueues */
599         if (dev->data->dev_started)
600                 return 0;
601
602         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
603                         qp_conf->nb_descriptors, socket_id, &vq);
604         if (ret < 0) {
605                 VIRTIO_CRYPTO_INIT_LOG_ERR(
606                         "virtio crypto data queue initialization failed\n");
607                 return ret;
608         }
609
610         dev->data->queue_pairs[queue_pair_id] = vq;
611
612         return 0;
613 }
614
615 static int
616 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
617 {
618         struct virtqueue *vq
619                 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
620
621         PMD_INIT_FUNC_TRACE();
622
623         if (vq == NULL) {
624                 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
625                 return 0;
626         }
627
628         virtio_crypto_queue_release(vq);
629         return 0;
630 }
631
632 static int
633 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
634 {
635         uint64_t host_features;
636
637         PMD_INIT_FUNC_TRACE();
638
639         /* Prepare guest_features: feature that driver wants to support */
640         VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
641                 req_features);
642
643         /* Read device(host) feature bits */
644         host_features = VTPCI_OPS(hw)->get_features(hw);
645         VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
646                 host_features);
647
648         /*
649          * Negotiate features: Subset of device feature bits are written back
650          * guest feature bits.
651          */
652         hw->guest_features = req_features;
653         hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
654                                                         host_features);
655         VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
656                 hw->guest_features);
657
658         if (hw->modern) {
659                 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
660                         VIRTIO_CRYPTO_INIT_LOG_ERR(
661                                 "VIRTIO_F_VERSION_1 features is not enabled.");
662                         return -1;
663                 }
664                 vtpci_cryptodev_set_status(hw,
665                         VIRTIO_CONFIG_STATUS_FEATURES_OK);
666                 if (!(vtpci_cryptodev_get_status(hw) &
667                         VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
668                         VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
669                                                 "status!");
670                         return -1;
671                 }
672         }
673
674         hw->req_guest_features = req_features;
675
676         return 0;
677 }
678
679 /* reset device and renegotiate features if needed */
680 static int
681 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
682         uint64_t req_features)
683 {
684         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
685         struct virtio_crypto_config local_config;
686         struct virtio_crypto_config *config = &local_config;
687
688         PMD_INIT_FUNC_TRACE();
689
690         /* Reset the device although not necessary at startup */
691         vtpci_cryptodev_reset(hw);
692
693         /* Tell the host we've noticed this device. */
694         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
695
696         /* Tell the host we've known how to drive the device. */
697         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
698         if (virtio_negotiate_features(hw, req_features) < 0)
699                 return -1;
700
701         /* Get status of the device */
702         vtpci_read_cryptodev_config(hw,
703                 offsetof(struct virtio_crypto_config, status),
704                 &config->status, sizeof(config->status));
705         if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
706                 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
707                                 "not ready");
708                 return -1;
709         }
710
711         /* Get number of data queues */
712         vtpci_read_cryptodev_config(hw,
713                 offsetof(struct virtio_crypto_config, max_dataqueues),
714                 &config->max_dataqueues,
715                 sizeof(config->max_dataqueues));
716         hw->max_dataqueues = config->max_dataqueues;
717
718         VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
719                 hw->max_dataqueues);
720
721         return 0;
722 }
723
724 /*
725  * This function is based on probe() function
726  * It returns 0 on success.
727  */
728 static int
729 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
730                 struct rte_cryptodev_pmd_init_params *init_params)
731 {
732         struct rte_cryptodev *cryptodev;
733         struct virtio_crypto_hw *hw;
734
735         PMD_INIT_FUNC_TRACE();
736
737         cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
738                                         init_params);
739         if (cryptodev == NULL)
740                 return -ENODEV;
741
742         cryptodev->driver_id = cryptodev_virtio_driver_id;
743         cryptodev->dev_ops = &virtio_crypto_dev_ops;
744
745         cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
746         cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
747
748         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
749                 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
750
751         hw = cryptodev->data->dev_private;
752         hw->dev_id = cryptodev->data->dev_id;
753         hw->virtio_dev_capabilities = virtio_capabilities;
754
755         VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
756                 cryptodev->data->dev_id, pci_dev->id.vendor_id,
757                 pci_dev->id.device_id);
758
759         /* pci device init */
760         if (vtpci_cryptodev_init(pci_dev, hw))
761                 return -1;
762
763         if (virtio_crypto_init_device(cryptodev,
764                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
765                 return -1;
766
767         return 0;
768 }
769
770 static int
771 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
772 {
773         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
774
775         PMD_INIT_FUNC_TRACE();
776
777         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
778                 return -EPERM;
779
780         if (cryptodev->data->dev_started) {
781                 virtio_crypto_dev_stop(cryptodev);
782                 virtio_crypto_dev_close(cryptodev);
783         }
784
785         cryptodev->dev_ops = NULL;
786         cryptodev->enqueue_burst = NULL;
787         cryptodev->dequeue_burst = NULL;
788
789         /* release control queue */
790         virtio_crypto_queue_release(hw->cvq);
791
792         rte_free(cryptodev->data);
793         cryptodev->data = NULL;
794
795         VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
796
797         return 0;
798 }
799
800 static int
801 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
802         struct rte_cryptodev_config *config __rte_unused)
803 {
804         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
805
806         PMD_INIT_FUNC_TRACE();
807
808         if (virtio_crypto_init_device(cryptodev,
809                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
810                 return -1;
811
812         /* setup control queue
813          * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
814          * config->max_dataqueues is the control queue
815          */
816         if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
817                 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
818                 return -1;
819         }
820         virtio_crypto_ctrlq_start(cryptodev);
821
822         return 0;
823 }
824
825 static void
826 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
827 {
828         struct virtio_crypto_hw *hw = dev->data->dev_private;
829
830         PMD_INIT_FUNC_TRACE();
831         VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
832
833         vtpci_cryptodev_reset(hw);
834
835         virtio_crypto_dev_free_mbufs(dev);
836         virtio_crypto_free_queues(dev);
837
838         dev->data->dev_started = 0;
839 }
840
841 static int
842 virtio_crypto_dev_start(struct rte_cryptodev *dev)
843 {
844         struct virtio_crypto_hw *hw = dev->data->dev_private;
845
846         if (dev->data->dev_started)
847                 return 0;
848
849         /* Do final configuration before queue engine starts */
850         virtio_crypto_dataq_start(dev);
851         vtpci_cryptodev_reinit_complete(hw);
852
853         dev->data->dev_started = 1;
854
855         return 0;
856 }
857
858 static void
859 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
860 {
861         uint32_t i;
862         struct virtio_crypto_hw *hw = dev->data->dev_private;
863
864         for (i = 0; i < hw->max_dataqueues; i++) {
865                 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
866                         "and unused buf", i);
867                 VIRTQUEUE_DUMP((struct virtqueue *)
868                         dev->data->queue_pairs[i]);
869
870                 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
871                                 i, dev->data->queue_pairs[i]);
872
873                 virtqueue_detatch_unused(dev->data->queue_pairs[i]);
874
875                 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
876                                         "unused buf", i);
877                 VIRTQUEUE_DUMP(
878                         (struct virtqueue *)dev->data->queue_pairs[i]);
879         }
880 }
881
882 static unsigned int
883 virtio_crypto_sym_get_session_private_size(
884                 struct rte_cryptodev *dev __rte_unused)
885 {
886         PMD_INIT_FUNC_TRACE();
887
888         return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
889 }
890
891 static int
892 virtio_crypto_check_sym_session_paras(
893                 struct rte_cryptodev *dev)
894 {
895         struct virtio_crypto_hw *hw;
896
897         PMD_INIT_FUNC_TRACE();
898
899         if (unlikely(dev == NULL)) {
900                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
901                 return -1;
902         }
903         if (unlikely(dev->data == NULL)) {
904                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
905                 return -1;
906         }
907         hw = dev->data->dev_private;
908         if (unlikely(hw == NULL)) {
909                 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
910                 return -1;
911         }
912         if (unlikely(hw->cvq == NULL)) {
913                 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
914                 return -1;
915         }
916
917         return 0;
918 }
919
920 static int
921 virtio_crypto_check_sym_clear_session_paras(
922                 struct rte_cryptodev *dev,
923                 struct rte_cryptodev_sym_session *sess)
924 {
925         PMD_INIT_FUNC_TRACE();
926
927         if (sess == NULL) {
928                 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
929                 return -1;
930         }
931
932         return virtio_crypto_check_sym_session_paras(dev);
933 }
934
935 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
936
937 static void
938 virtio_crypto_sym_clear_session(
939                 struct rte_cryptodev *dev,
940                 struct rte_cryptodev_sym_session *sess)
941 {
942         struct virtio_crypto_hw *hw;
943         struct virtqueue *vq;
944         struct virtio_crypto_session *session;
945         struct virtio_crypto_op_ctrl_req *ctrl;
946         struct vring_desc *desc;
947         uint8_t *status;
948         uint8_t needed = 1;
949         uint32_t head;
950         uint8_t *malloc_virt_addr;
951         uint64_t malloc_phys_addr;
952         uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
953         uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
954         uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
955
956         PMD_INIT_FUNC_TRACE();
957
958         if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
959                 return;
960
961         hw = dev->data->dev_private;
962         vq = hw->cvq;
963         session = (struct virtio_crypto_session *)get_session_private_data(
964                 sess, cryptodev_virtio_driver_id);
965         if (session == NULL) {
966                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
967                 return;
968         }
969
970         VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
971                         "vq = %p", vq->vq_desc_head_idx, vq);
972
973         if (vq->vq_free_cnt < needed) {
974                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
975                                 "vq->vq_free_cnt = %d is less than %d, "
976                                 "not enough", vq->vq_free_cnt, needed);
977                 return;
978         }
979
980         /*
981          * malloc memory to store information of ctrl request op,
982          * returned status and desc vring
983          */
984         malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
985                 + NUM_ENTRY_SYM_CLEAR_SESSION
986                 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
987         if (malloc_virt_addr == NULL) {
988                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
989                 return;
990         }
991         malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
992
993         /* assign ctrl request op part */
994         ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
995         ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
996         /* default data virtqueue is 0 */
997         ctrl->header.queue_id = 0;
998         ctrl->u.destroy_session.session_id = session->session_id;
999
1000         /* status part */
1001         status = &(((struct virtio_crypto_inhdr *)
1002                 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
1003         *status = VIRTIO_CRYPTO_ERR;
1004
1005         /* indirect desc vring part */
1006         desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
1007                 + desc_offset);
1008
1009         /* ctrl request part */
1010         desc[0].addr = malloc_phys_addr;
1011         desc[0].len = len_op_ctrl_req;
1012         desc[0].flags = VRING_DESC_F_NEXT;
1013         desc[0].next = 1;
1014
1015         /* status part */
1016         desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
1017         desc[1].len = len_inhdr;
1018         desc[1].flags = VRING_DESC_F_WRITE;
1019
1020         /* use only a single desc entry */
1021         head = vq->vq_desc_head_idx;
1022         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
1023         vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
1024         vq->vq_ring.desc[head].len
1025                 = NUM_ENTRY_SYM_CLEAR_SESSION
1026                 * sizeof(struct vring_desc);
1027         vq->vq_free_cnt -= needed;
1028
1029         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
1030
1031         vq_update_avail_ring(vq, head);
1032         vq_update_avail_idx(vq);
1033
1034         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
1035                                         vq->vq_queue_index);
1036
1037         virtqueue_notify(vq);
1038
1039         rte_rmb();
1040         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
1041                 rte_rmb();
1042                 usleep(100);
1043         }
1044
1045         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
1046                 uint32_t idx, desc_idx, used_idx;
1047                 struct vring_used_elem *uep;
1048
1049                 used_idx = (uint32_t)(vq->vq_used_cons_idx
1050                                 & (vq->vq_nentries - 1));
1051                 uep = &vq->vq_ring.used->ring[used_idx];
1052                 idx = (uint32_t) uep->id;
1053                 desc_idx = idx;
1054                 while (vq->vq_ring.desc[desc_idx].flags
1055                                 & VRING_DESC_F_NEXT) {
1056                         desc_idx = vq->vq_ring.desc[desc_idx].next;
1057                         vq->vq_free_cnt++;
1058                 }
1059
1060                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
1061                 vq->vq_desc_head_idx = idx;
1062                 vq->vq_used_cons_idx++;
1063                 vq->vq_free_cnt++;
1064         }
1065
1066         if (*status != VIRTIO_CRYPTO_OK) {
1067                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1068                                 "status=%"PRIu32", session_id=%"PRIu64"",
1069                                 *status, session->session_id);
1070                 rte_free(malloc_virt_addr);
1071                 return;
1072         }
1073
1074         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
1075                         "vq->vq_desc_head_idx=%d",
1076                         vq->vq_free_cnt, vq->vq_desc_head_idx);
1077
1078         VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1079                         session->session_id);
1080
1081         memset(sess, 0, sizeof(struct virtio_crypto_session));
1082         rte_free(malloc_virt_addr);
1083 }
1084
1085 static struct rte_crypto_cipher_xform *
1086 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1087 {
1088         do {
1089                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1090                         return &xform->cipher;
1091
1092                 xform = xform->next;
1093         } while (xform);
1094
1095         return NULL;
1096 }
1097
1098 static struct rte_crypto_auth_xform *
1099 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1100 {
1101         do {
1102                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1103                         return &xform->auth;
1104
1105                 xform = xform->next;
1106         } while (xform);
1107
1108         return NULL;
1109 }
1110
1111 /** Get xform chain order */
1112 static int
1113 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1114 {
1115         if (xform == NULL)
1116                 return -1;
1117
1118         /* Cipher Only */
1119         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1120                         xform->next == NULL)
1121                 return VIRTIO_CRYPTO_CMD_CIPHER;
1122
1123         /* Authentication Only */
1124         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1125                         xform->next == NULL)
1126                 return VIRTIO_CRYPTO_CMD_AUTH;
1127
1128         /* Authenticate then Cipher */
1129         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1130                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1131                 return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1132
1133         /* Cipher then Authenticate */
1134         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1135                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1136                 return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1137
1138         return -1;
1139 }
1140
1141 static int
1142 virtio_crypto_sym_pad_cipher_param(
1143                 struct virtio_crypto_cipher_session_para *para,
1144                 struct rte_crypto_cipher_xform *cipher_xform)
1145 {
1146         switch (cipher_xform->algo) {
1147         case RTE_CRYPTO_CIPHER_AES_CBC:
1148                 para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
1149                 break;
1150         default:
1151                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1152                                 "Cipher alg %u", cipher_xform->algo);
1153                 return -1;
1154         }
1155
1156         para->keylen = cipher_xform->key.length;
1157         switch (cipher_xform->op) {
1158         case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1159                 para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1160                 break;
1161         case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1162                 para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1163                 break;
1164         default:
1165                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1166                                         "parameter");
1167                 return -1;
1168         }
1169
1170         return 0;
1171 }
1172
1173 static int
1174 virtio_crypto_sym_pad_auth_param(
1175                 struct virtio_crypto_op_ctrl_req *ctrl,
1176                 struct rte_crypto_auth_xform *auth_xform)
1177 {
1178         uint32_t *algo;
1179         struct virtio_crypto_alg_chain_session_para *para =
1180                 &(ctrl->u.sym_create_session.u.chain.para);
1181
1182         switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1183         case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1184                 algo = &(para->u.hash_param.algo);
1185                 break;
1186         case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1187                 algo = &(para->u.mac_param.algo);
1188                 break;
1189         default:
1190                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1191                         "specified",
1192                         ctrl->u.sym_create_session.u.chain.para.hash_mode);
1193                 return -1;
1194         }
1195
1196         switch (auth_xform->algo) {
1197         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1198                 *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
1199                 break;
1200         default:
1201                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1202                         "Crypto: Undefined Hash algo %u specified",
1203                         auth_xform->algo);
1204                 return -1;
1205         }
1206
1207         return 0;
1208 }
1209
1210 static int
1211 virtio_crypto_sym_pad_op_ctrl_req(
1212                 struct virtio_crypto_op_ctrl_req *ctrl,
1213                 struct rte_crypto_sym_xform *xform, bool is_chainned,
1214                 uint8_t **cipher_key_data, uint8_t **auth_key_data,
1215                 struct virtio_crypto_session *session)
1216 {
1217         int ret;
1218         struct rte_crypto_auth_xform *auth_xform = NULL;
1219         struct rte_crypto_cipher_xform *cipher_xform = NULL;
1220
1221         /* Get cipher xform from crypto xform chain */
1222         cipher_xform = virtio_crypto_get_cipher_xform(xform);
1223         if (cipher_xform) {
1224                 if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
1225                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1226                                 "cipher IV size cannot be longer than %u",
1227                                 VIRTIO_CRYPTO_MAX_IV_SIZE);
1228                         return -1;
1229                 }
1230                 if (is_chainned)
1231                         ret = virtio_crypto_sym_pad_cipher_param(
1232                                 &ctrl->u.sym_create_session.u.chain.para
1233                                                 .cipher_param, cipher_xform);
1234                 else
1235                         ret = virtio_crypto_sym_pad_cipher_param(
1236                                 &ctrl->u.sym_create_session.u.cipher.para,
1237                                 cipher_xform);
1238
1239                 if (ret < 0) {
1240                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1241                                 "pad cipher parameter failed");
1242                         return -1;
1243                 }
1244
1245                 *cipher_key_data = cipher_xform->key.data;
1246
1247                 session->iv.offset = cipher_xform->iv.offset;
1248                 session->iv.length = cipher_xform->iv.length;
1249         }
1250
1251         /* Get auth xform from crypto xform chain */
1252         auth_xform = virtio_crypto_get_auth_xform(xform);
1253         if (auth_xform) {
1254                 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1255                 struct virtio_crypto_alg_chain_session_para *para =
1256                         &(ctrl->u.sym_create_session.u.chain.para);
1257                 if (auth_xform->key.length) {
1258                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1259                         para->u.mac_param.auth_key_len =
1260                                 (uint32_t)auth_xform->key.length;
1261                         para->u.mac_param.hash_result_len =
1262                                 auth_xform->digest_length;
1263
1264                         *auth_key_data = auth_xform->key.data;
1265                 } else {
1266                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1267                         para->u.hash_param.hash_result_len =
1268                                 auth_xform->digest_length;
1269                 }
1270
1271                 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1272                 if (ret < 0) {
1273                         VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1274                                                 "failed");
1275                         return -1;
1276                 }
1277         }
1278
1279         return 0;
1280 }
1281
1282 static int
1283 virtio_crypto_check_sym_configure_session_paras(
1284                 struct rte_cryptodev *dev,
1285                 struct rte_crypto_sym_xform *xform,
1286                 struct rte_cryptodev_sym_session *sym_sess,
1287                 struct rte_mempool *mempool)
1288 {
1289         if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
1290                 unlikely(mempool == NULL)) {
1291                 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1292                 return -1;
1293         }
1294
1295         if (virtio_crypto_check_sym_session_paras(dev) < 0)
1296                 return -1;
1297
1298         return 0;
1299 }
1300
1301 static int
1302 virtio_crypto_sym_configure_session(
1303                 struct rte_cryptodev *dev,
1304                 struct rte_crypto_sym_xform *xform,
1305                 struct rte_cryptodev_sym_session *sess,
1306                 struct rte_mempool *mempool)
1307 {
1308         int ret;
1309         struct virtio_crypto_session crypto_sess;
1310         void *session_private = &crypto_sess;
1311         struct virtio_crypto_session *session;
1312         struct virtio_crypto_op_ctrl_req *ctrl_req;
1313         enum virtio_crypto_cmd_id cmd_id;
1314         uint8_t *cipher_key_data = NULL;
1315         uint8_t *auth_key_data = NULL;
1316         struct virtio_crypto_hw *hw;
1317         struct virtqueue *control_vq;
1318
1319         PMD_INIT_FUNC_TRACE();
1320
1321         ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1322                         sess, mempool);
1323         if (ret < 0) {
1324                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1325                 return ret;
1326         }
1327
1328         if (rte_mempool_get(mempool, &session_private)) {
1329                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1330                         "Couldn't get object from session mempool");
1331                 return -ENOMEM;
1332         }
1333
1334         session = (struct virtio_crypto_session *)session_private;
1335         memset(session, 0, sizeof(struct virtio_crypto_session));
1336         ctrl_req = &session->ctrl;
1337         ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1338         /* FIXME: support multiqueue */
1339         ctrl_req->header.queue_id = 0;
1340
1341         hw = dev->data->dev_private;
1342         control_vq = hw->cvq;
1343
1344         cmd_id = virtio_crypto_get_chain_order(xform);
1345         if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1346                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1347                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1348         if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1349                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1350                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1351
1352         switch (cmd_id) {
1353         case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1354         case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1355                 ctrl_req->u.sym_create_session.op_type
1356                         = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1357
1358                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1359                         xform, true, &cipher_key_data, &auth_key_data, session);
1360                 if (ret < 0) {
1361                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1362                                 "padding sym op ctrl req failed");
1363                         goto error_out;
1364                 }
1365                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1366                         cipher_key_data, auth_key_data, session);
1367                 if (ret < 0) {
1368                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1369                                 "create session failed: %d", ret);
1370                         goto error_out;
1371                 }
1372                 break;
1373         case VIRTIO_CRYPTO_CMD_CIPHER:
1374                 ctrl_req->u.sym_create_session.op_type
1375                         = VIRTIO_CRYPTO_SYM_OP_CIPHER;
1376                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1377                         false, &cipher_key_data, &auth_key_data, session);
1378                 if (ret < 0) {
1379                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1380                                 "padding sym op ctrl req failed");
1381                         goto error_out;
1382                 }
1383                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1384                         cipher_key_data, NULL, session);
1385                 if (ret < 0) {
1386                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1387                                 "create session failed: %d", ret);
1388                         goto error_out;
1389                 }
1390                 break;
1391         default:
1392                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1393                         "Unsupported operation chain order parameter");
1394                 goto error_out;
1395         }
1396
1397         set_session_private_data(sess, dev->driver_id,
1398                 session_private);
1399
1400         return 0;
1401
1402 error_out:
1403         return -1;
1404 }
1405
1406 static void
1407 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1408                 struct rte_cryptodev_info *info)
1409 {
1410         struct virtio_crypto_hw *hw = dev->data->dev_private;
1411
1412         PMD_INIT_FUNC_TRACE();
1413
1414         if (info != NULL) {
1415                 info->driver_id = cryptodev_virtio_driver_id;
1416                 info->feature_flags = dev->feature_flags;
1417                 info->max_nb_queue_pairs = hw->max_dataqueues;
1418                 /* No limit of number of sessions */
1419                 info->sym.max_nb_sessions = 0;
1420                 info->capabilities = hw->virtio_dev_capabilities;
1421         }
1422 }
1423
1424 static int
1425 crypto_virtio_pci_probe(
1426         struct rte_pci_driver *pci_drv __rte_unused,
1427         struct rte_pci_device *pci_dev)
1428 {
1429         struct rte_cryptodev_pmd_init_params init_params = {
1430                 .name = "",
1431                 .socket_id = rte_socket_id(),
1432                 .private_data_size = sizeof(struct virtio_crypto_hw)
1433         };
1434         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1435
1436         VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1437                         pci_dev->addr.bus,
1438                         pci_dev->addr.devid,
1439                         pci_dev->addr.function);
1440
1441         rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1442
1443         return crypto_virtio_create(name, pci_dev, &init_params);
1444 }
1445
1446 static int
1447 crypto_virtio_pci_remove(
1448         struct rte_pci_device *pci_dev __rte_unused)
1449 {
1450         struct rte_cryptodev *cryptodev;
1451         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1452
1453         if (pci_dev == NULL)
1454                 return -EINVAL;
1455
1456         rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1457                         sizeof(cryptodev_name));
1458
1459         cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1460         if (cryptodev == NULL)
1461                 return -ENODEV;
1462
1463         return virtio_crypto_dev_uninit(cryptodev);
1464 }
1465
1466 static struct rte_pci_driver rte_virtio_crypto_driver = {
1467         .id_table = pci_id_virtio_crypto_map,
1468         .drv_flags = 0,
1469         .probe = crypto_virtio_pci_probe,
1470         .remove = crypto_virtio_pci_remove
1471 };
1472
1473 static struct cryptodev_driver virtio_crypto_drv;
1474
1475 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1476 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1477         rte_virtio_crypto_driver.driver,
1478         cryptodev_virtio_driver_id);
1479
1480 RTE_INIT(virtio_crypto_init_log);
1481 static void
1482 virtio_crypto_init_log(void)
1483 {
1484         virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
1485         if (virtio_crypto_logtype_init >= 0)
1486                 rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
1487
1488         virtio_crypto_logtype_session =
1489                 rte_log_register("pmd.crypto.virtio.session");
1490         if (virtio_crypto_logtype_session >= 0)
1491                 rte_log_set_level(virtio_crypto_logtype_session,
1492                                 RTE_LOG_NOTICE);
1493
1494         virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
1495         if (virtio_crypto_logtype_rx >= 0)
1496                 rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
1497
1498         virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
1499         if (virtio_crypto_logtype_tx >= 0)
1500                 rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
1501
1502         virtio_crypto_logtype_driver =
1503                 rte_log_register("pmd.crypto.virtio.driver");
1504         if (virtio_crypto_logtype_driver >= 0)
1505                 rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);
1506 }