8faa39df4a41eaaad4edfb36a5fc42ecbf881715
[dpdk.git] / drivers / crypto / virtio / virtio_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 #include <stdbool.h>
5 #include <unistd.h>
6
7 #include <rte_common.h>
8 #include <rte_errno.h>
9 #include <rte_pci.h>
10 #include <rte_bus_pci.h>
11 #include <rte_cryptodev.h>
12 #include <cryptodev_pmd.h>
13 #include <rte_eal.h>
14
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
18 #include "virtio_crypto_capabilities.h"
19
20 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
21                 struct rte_cryptodev_config *config);
22 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
23 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
24 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
25 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
26                 struct rte_cryptodev_info *dev_info);
27 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
28                 struct rte_cryptodev_stats *stats);
29 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
30 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
31                 uint16_t queue_pair_id,
32                 const struct rte_cryptodev_qp_conf *qp_conf,
33                 int socket_id);
34 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
35                 uint16_t queue_pair_id);
36 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
37 static unsigned int virtio_crypto_sym_get_session_private_size(
38                 struct rte_cryptodev *dev);
39 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
40                 struct rte_cryptodev_sym_session *sess);
41 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
42                 struct rte_crypto_sym_xform *xform,
43                 struct rte_cryptodev_sym_session *session,
44                 struct rte_mempool *mp);
45
46 /*
47  * The set of PCI devices this driver supports
48  */
49 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
50         { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
51                                 VIRTIO_CRYPTO_PCI_DEVICEID) },
52         { .vendor_id = 0, /* sentinel */ },
53 };
54
55 static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
56         VIRTIO_SYM_CAPABILITIES,
57         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
58 };
59
60 uint8_t cryptodev_virtio_driver_id;
61
62 #define NUM_ENTRY_SYM_CREATE_SESSION 4
63
64 static int
65 virtio_crypto_send_command(struct virtqueue *vq,
66                 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
67                 uint8_t *auth_key, struct virtio_crypto_session *session)
68 {
69         uint8_t idx = 0;
70         uint8_t needed = 1;
71         uint32_t head = 0;
72         uint32_t len_cipher_key = 0;
73         uint32_t len_auth_key = 0;
74         uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
75         uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
76         uint32_t len_total = 0;
77         uint32_t input_offset = 0;
78         void *virt_addr_started = NULL;
79         phys_addr_t phys_addr_started;
80         struct vring_desc *desc;
81         uint32_t desc_offset;
82         struct virtio_crypto_session_input *input;
83         int ret;
84
85         PMD_INIT_FUNC_TRACE();
86
87         if (session == NULL) {
88                 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
89                 return -EINVAL;
90         }
91         /* cipher only is supported, it is available if auth_key is NULL */
92         if (!cipher_key) {
93                 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
94                 return -EINVAL;
95         }
96
97         head = vq->vq_desc_head_idx;
98         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
99                                         head, vq);
100
101         if (vq->vq_free_cnt < needed) {
102                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
103                 return -ENOSPC;
104         }
105
106         /* calculate the length of cipher key */
107         if (cipher_key) {
108                 switch (ctrl->u.sym_create_session.op_type) {
109                 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
110                         len_cipher_key
111                                 = ctrl->u.sym_create_session.u.cipher
112                                                         .para.keylen;
113                         break;
114                 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
115                         len_cipher_key
116                                 = ctrl->u.sym_create_session.u.chain
117                                         .para.cipher_param.keylen;
118                         break;
119                 default:
120                         VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
121                         return -EINVAL;
122                 }
123         }
124
125         /* calculate the length of auth key */
126         if (auth_key) {
127                 len_auth_key =
128                         ctrl->u.sym_create_session.u.chain.para.u.mac_param
129                                 .auth_key_len;
130         }
131
132         /*
133          * malloc memory to store indirect vring_desc entries, including
134          * ctrl request, cipher key, auth key, session input and desc vring
135          */
136         desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
137                 + len_session_input;
138         virt_addr_started = rte_malloc(NULL,
139                 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
140                         * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
141         if (virt_addr_started == NULL) {
142                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
143                 return -ENOSPC;
144         }
145         phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
146
147         /* address to store indirect vring desc entries */
148         desc = (struct vring_desc *)
149                 ((uint8_t *)virt_addr_started + desc_offset);
150
151         /*  ctrl req part */
152         memcpy(virt_addr_started, ctrl, len_ctrl_req);
153         desc[idx].addr = phys_addr_started;
154         desc[idx].len = len_ctrl_req;
155         desc[idx].flags = VRING_DESC_F_NEXT;
156         desc[idx].next = idx + 1;
157         idx++;
158         len_total += len_ctrl_req;
159         input_offset += len_ctrl_req;
160
161         /* cipher key part */
162         if (len_cipher_key > 0) {
163                 memcpy((uint8_t *)virt_addr_started + len_total,
164                         cipher_key, len_cipher_key);
165
166                 desc[idx].addr = phys_addr_started + len_total;
167                 desc[idx].len = len_cipher_key;
168                 desc[idx].flags = VRING_DESC_F_NEXT;
169                 desc[idx].next = idx + 1;
170                 idx++;
171                 len_total += len_cipher_key;
172                 input_offset += len_cipher_key;
173         }
174
175         /* auth key part */
176         if (len_auth_key > 0) {
177                 memcpy((uint8_t *)virt_addr_started + len_total,
178                         auth_key, len_auth_key);
179
180                 desc[idx].addr = phys_addr_started + len_total;
181                 desc[idx].len = len_auth_key;
182                 desc[idx].flags = VRING_DESC_F_NEXT;
183                 desc[idx].next = idx + 1;
184                 idx++;
185                 len_total += len_auth_key;
186                 input_offset += len_auth_key;
187         }
188
189         /* input part */
190         input = (struct virtio_crypto_session_input *)
191                 ((uint8_t *)virt_addr_started + input_offset);
192         input->status = VIRTIO_CRYPTO_ERR;
193         input->session_id = ~0ULL;
194         desc[idx].addr = phys_addr_started + len_total;
195         desc[idx].len = len_session_input;
196         desc[idx].flags = VRING_DESC_F_WRITE;
197         idx++;
198
199         /* use a single desc entry */
200         vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
201         vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
202         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
203         vq->vq_free_cnt--;
204
205         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
206
207         vq_update_avail_ring(vq, head);
208         vq_update_avail_idx(vq);
209
210         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
211                                         vq->vq_queue_index);
212
213         virtqueue_notify(vq);
214
215         rte_rmb();
216         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
217                 rte_rmb();
218                 usleep(100);
219         }
220
221         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
222                 uint32_t idx, desc_idx, used_idx;
223                 struct vring_used_elem *uep;
224
225                 used_idx = (uint32_t)(vq->vq_used_cons_idx
226                                 & (vq->vq_nentries - 1));
227                 uep = &vq->vq_ring.used->ring[used_idx];
228                 idx = (uint32_t) uep->id;
229                 desc_idx = idx;
230
231                 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
232                         desc_idx = vq->vq_ring.desc[desc_idx].next;
233                         vq->vq_free_cnt++;
234                 }
235
236                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
237                 vq->vq_desc_head_idx = idx;
238
239                 vq->vq_used_cons_idx++;
240                 vq->vq_free_cnt++;
241         }
242
243         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
244                         "vq->vq_desc_head_idx=%d",
245                         vq->vq_free_cnt, vq->vq_desc_head_idx);
246
247         /* get the result */
248         if (input->status != VIRTIO_CRYPTO_OK) {
249                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
250                                 "status=%u, session_id=%" PRIu64 "",
251                                 input->status, input->session_id);
252                 rte_free(virt_addr_started);
253                 ret = -1;
254         } else {
255                 session->session_id = input->session_id;
256
257                 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
258                                 "session_id=%" PRIu64 "", input->session_id);
259                 rte_free(virt_addr_started);
260                 ret = 0;
261         }
262
263         return ret;
264 }
265
266 void
267 virtio_crypto_queue_release(struct virtqueue *vq)
268 {
269         struct virtio_crypto_hw *hw;
270
271         PMD_INIT_FUNC_TRACE();
272
273         if (vq) {
274                 hw = vq->hw;
275                 /* Select and deactivate the queue */
276                 VTPCI_OPS(hw)->del_queue(hw, vq);
277
278                 rte_memzone_free(vq->mz);
279                 rte_mempool_free(vq->mpool);
280                 rte_free(vq);
281         }
282 }
283
284 #define MPOOL_MAX_NAME_SZ 32
285
286 int
287 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
288                 int queue_type,
289                 uint16_t vtpci_queue_idx,
290                 uint16_t nb_desc,
291                 int socket_id,
292                 struct virtqueue **pvq)
293 {
294         char vq_name[VIRTQUEUE_MAX_NAME_SZ];
295         char mpool_name[MPOOL_MAX_NAME_SZ];
296         const struct rte_memzone *mz;
297         unsigned int vq_size, size;
298         struct virtio_crypto_hw *hw = dev->data->dev_private;
299         struct virtqueue *vq = NULL;
300         uint32_t i = 0;
301         uint32_t j;
302
303         PMD_INIT_FUNC_TRACE();
304
305         VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
306
307         /*
308          * Read the virtqueue size from the Queue Size field
309          * Always power of 2 and if 0 virtqueue does not exist
310          */
311         vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
312         if (vq_size == 0) {
313                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
314                 return -EINVAL;
315         }
316         VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
317
318         if (!rte_is_power_of_2(vq_size)) {
319                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
320                 return -EINVAL;
321         }
322
323         if (queue_type == VTCRYPTO_DATAQ) {
324                 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
325                                 dev->data->dev_id, vtpci_queue_idx);
326                 snprintf(mpool_name, sizeof(mpool_name),
327                                 "dev%d_dataqueue%d_mpool",
328                                 dev->data->dev_id, vtpci_queue_idx);
329         } else if (queue_type == VTCRYPTO_CTRLQ) {
330                 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
331                                 dev->data->dev_id);
332                 snprintf(mpool_name, sizeof(mpool_name),
333                                 "dev%d_controlqueue_mpool",
334                                 dev->data->dev_id);
335         }
336         size = RTE_ALIGN_CEIL(sizeof(*vq) +
337                                 vq_size * sizeof(struct vq_desc_extra),
338                                 RTE_CACHE_LINE_SIZE);
339         vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
340                                 socket_id);
341         if (vq == NULL) {
342                 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
343                 return -ENOMEM;
344         }
345
346         if (queue_type == VTCRYPTO_DATAQ) {
347                 /* pre-allocate a mempool and use it in the data plane to
348                  * improve performance
349                  */
350                 vq->mpool = rte_mempool_lookup(mpool_name);
351                 if (vq->mpool == NULL)
352                         vq->mpool = rte_mempool_create(mpool_name,
353                                         vq_size,
354                                         sizeof(struct virtio_crypto_op_cookie),
355                                         RTE_CACHE_LINE_SIZE, 0,
356                                         NULL, NULL, NULL, NULL, socket_id,
357                                         0);
358                 if (!vq->mpool) {
359                         VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
360                                         "Cannot create mempool");
361                         goto mpool_create_err;
362                 }
363                 for (i = 0; i < vq_size; i++) {
364                         vq->vq_descx[i].cookie =
365                                 rte_zmalloc("crypto PMD op cookie pointer",
366                                         sizeof(struct virtio_crypto_op_cookie),
367                                         RTE_CACHE_LINE_SIZE);
368                         if (vq->vq_descx[i].cookie == NULL) {
369                                 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
370                                                 "alloc mem for cookie");
371                                 goto cookie_alloc_err;
372                         }
373                 }
374         }
375
376         vq->hw = hw;
377         vq->dev_id = dev->data->dev_id;
378         vq->vq_queue_index = vtpci_queue_idx;
379         vq->vq_nentries = vq_size;
380
381         /*
382          * Using part of the vring entries is permitted, but the maximum
383          * is vq_size
384          */
385         if (nb_desc == 0 || nb_desc > vq_size)
386                 nb_desc = vq_size;
387         vq->vq_free_cnt = nb_desc;
388
389         /*
390          * Reserve a memzone for vring elements
391          */
392         size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
393         vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
394         VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
395                         (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
396                         size, vq->vq_ring_size);
397
398         mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
399                         socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
400         if (mz == NULL) {
401                 if (rte_errno == EEXIST)
402                         mz = rte_memzone_lookup(vq_name);
403                 if (mz == NULL) {
404                         VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
405                         goto mz_reserve_err;
406                 }
407         }
408
409         /*
410          * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
411          * and only accepts 32 bit page frame number.
412          * Check if the allocated physical memory exceeds 16TB.
413          */
414         if ((mz->iova + vq->vq_ring_size - 1)
415                                 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
416                 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
417                                         "above 16TB!");
418                 goto vring_addr_err;
419         }
420
421         memset(mz->addr, 0, sizeof(mz->len));
422         vq->mz = mz;
423         vq->vq_ring_mem = mz->iova;
424         vq->vq_ring_virt_mem = mz->addr;
425         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
426                                         (uint64_t)mz->iova);
427         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
428                                         (uint64_t)(uintptr_t)mz->addr);
429
430         *pvq = vq;
431
432         return 0;
433
434 vring_addr_err:
435         rte_memzone_free(mz);
436 mz_reserve_err:
437 cookie_alloc_err:
438         rte_mempool_free(vq->mpool);
439         if (i != 0) {
440                 for (j = 0; j < i; j++)
441                         rte_free(vq->vq_descx[j].cookie);
442         }
443 mpool_create_err:
444         rte_free(vq);
445         return -ENOMEM;
446 }
447
448 static int
449 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
450 {
451         int ret;
452         struct virtqueue *vq;
453         struct virtio_crypto_hw *hw = dev->data->dev_private;
454
455         /* if virtio device has started, do not touch the virtqueues */
456         if (dev->data->dev_started)
457                 return 0;
458
459         PMD_INIT_FUNC_TRACE();
460
461         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
462                         0, SOCKET_ID_ANY, &vq);
463         if (ret < 0) {
464                 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
465                 return ret;
466         }
467
468         hw->cvq = vq;
469
470         return 0;
471 }
472
473 static void
474 virtio_crypto_free_queues(struct rte_cryptodev *dev)
475 {
476         unsigned int i;
477         struct virtio_crypto_hw *hw = dev->data->dev_private;
478
479         PMD_INIT_FUNC_TRACE();
480
481         /* control queue release */
482         virtio_crypto_queue_release(hw->cvq);
483
484         /* data queue release */
485         for (i = 0; i < hw->max_dataqueues; i++)
486                 virtio_crypto_queue_release(dev->data->queue_pairs[i]);
487 }
488
489 static int
490 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
491 {
492         return 0;
493 }
494
495 /*
496  * dev_ops for virtio, bare necessities for basic operation
497  */
498 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
499         /* Device related operations */
500         .dev_configure                   = virtio_crypto_dev_configure,
501         .dev_start                       = virtio_crypto_dev_start,
502         .dev_stop                        = virtio_crypto_dev_stop,
503         .dev_close                       = virtio_crypto_dev_close,
504         .dev_infos_get                   = virtio_crypto_dev_info_get,
505
506         .stats_get                       = virtio_crypto_dev_stats_get,
507         .stats_reset                     = virtio_crypto_dev_stats_reset,
508
509         .queue_pair_setup                = virtio_crypto_qp_setup,
510         .queue_pair_release              = virtio_crypto_qp_release,
511
512         /* Crypto related operations */
513         .sym_session_get_size           = virtio_crypto_sym_get_session_private_size,
514         .sym_session_configure          = virtio_crypto_sym_configure_session,
515         .sym_session_clear              = virtio_crypto_sym_clear_session
516 };
517
518 static void
519 virtio_crypto_update_stats(struct rte_cryptodev *dev,
520                 struct rte_cryptodev_stats *stats)
521 {
522         unsigned int i;
523         struct virtio_crypto_hw *hw = dev->data->dev_private;
524
525         PMD_INIT_FUNC_TRACE();
526
527         if (stats == NULL) {
528                 VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
529                 return;
530         }
531
532         for (i = 0; i < hw->max_dataqueues; i++) {
533                 const struct virtqueue *data_queue
534                         = dev->data->queue_pairs[i];
535                 if (data_queue == NULL)
536                         continue;
537
538                 stats->enqueued_count += data_queue->packets_sent_total;
539                 stats->enqueue_err_count += data_queue->packets_sent_failed;
540
541                 stats->dequeued_count += data_queue->packets_received_total;
542                 stats->dequeue_err_count
543                         += data_queue->packets_received_failed;
544         }
545 }
546
547 static void
548 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
549                 struct rte_cryptodev_stats *stats)
550 {
551         PMD_INIT_FUNC_TRACE();
552
553         virtio_crypto_update_stats(dev, stats);
554 }
555
556 static void
557 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
558 {
559         unsigned int i;
560         struct virtio_crypto_hw *hw = dev->data->dev_private;
561
562         PMD_INIT_FUNC_TRACE();
563
564         for (i = 0; i < hw->max_dataqueues; i++) {
565                 struct virtqueue *data_queue = dev->data->queue_pairs[i];
566                 if (data_queue == NULL)
567                         continue;
568
569                 data_queue->packets_sent_total = 0;
570                 data_queue->packets_sent_failed = 0;
571
572                 data_queue->packets_received_total = 0;
573                 data_queue->packets_received_failed = 0;
574         }
575 }
576
577 static int
578 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
579                 const struct rte_cryptodev_qp_conf *qp_conf,
580                 int socket_id)
581 {
582         int ret;
583         struct virtqueue *vq;
584
585         PMD_INIT_FUNC_TRACE();
586
587         /* if virtio dev is started, do not touch the virtqueues */
588         if (dev->data->dev_started)
589                 return 0;
590
591         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
592                         qp_conf->nb_descriptors, socket_id, &vq);
593         if (ret < 0) {
594                 VIRTIO_CRYPTO_INIT_LOG_ERR(
595                         "virtio crypto data queue initialization failed\n");
596                 return ret;
597         }
598
599         dev->data->queue_pairs[queue_pair_id] = vq;
600
601         return 0;
602 }
603
604 static int
605 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
606 {
607         struct virtqueue *vq
608                 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
609
610         PMD_INIT_FUNC_TRACE();
611
612         if (vq == NULL) {
613                 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
614                 return 0;
615         }
616
617         virtio_crypto_queue_release(vq);
618         return 0;
619 }
620
621 static int
622 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
623 {
624         uint64_t host_features;
625
626         PMD_INIT_FUNC_TRACE();
627
628         /* Prepare guest_features: feature that driver wants to support */
629         VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
630                 req_features);
631
632         /* Read device(host) feature bits */
633         host_features = VTPCI_OPS(hw)->get_features(hw);
634         VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
635                 host_features);
636
637         /*
638          * Negotiate features: Subset of device feature bits are written back
639          * guest feature bits.
640          */
641         hw->guest_features = req_features;
642         hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
643                                                         host_features);
644         VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
645                 hw->guest_features);
646
647         if (hw->modern) {
648                 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
649                         VIRTIO_CRYPTO_INIT_LOG_ERR(
650                                 "VIRTIO_F_VERSION_1 features is not enabled.");
651                         return -1;
652                 }
653                 vtpci_cryptodev_set_status(hw,
654                         VIRTIO_CONFIG_STATUS_FEATURES_OK);
655                 if (!(vtpci_cryptodev_get_status(hw) &
656                         VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
657                         VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
658                                                 "status!");
659                         return -1;
660                 }
661         }
662
663         hw->req_guest_features = req_features;
664
665         return 0;
666 }
667
668 /* reset device and renegotiate features if needed */
669 static int
670 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
671         uint64_t req_features)
672 {
673         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
674         struct virtio_crypto_config local_config;
675         struct virtio_crypto_config *config = &local_config;
676
677         PMD_INIT_FUNC_TRACE();
678
679         /* Reset the device although not necessary at startup */
680         vtpci_cryptodev_reset(hw);
681
682         /* Tell the host we've noticed this device. */
683         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
684
685         /* Tell the host we've known how to drive the device. */
686         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
687         if (virtio_negotiate_features(hw, req_features) < 0)
688                 return -1;
689
690         /* Get status of the device */
691         vtpci_read_cryptodev_config(hw,
692                 offsetof(struct virtio_crypto_config, status),
693                 &config->status, sizeof(config->status));
694         if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
695                 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
696                                 "not ready");
697                 return -1;
698         }
699
700         /* Get number of data queues */
701         vtpci_read_cryptodev_config(hw,
702                 offsetof(struct virtio_crypto_config, max_dataqueues),
703                 &config->max_dataqueues,
704                 sizeof(config->max_dataqueues));
705         hw->max_dataqueues = config->max_dataqueues;
706
707         VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
708                 hw->max_dataqueues);
709
710         return 0;
711 }
712
713 /*
714  * This function is based on probe() function
715  * It returns 0 on success.
716  */
717 static int
718 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
719                 struct rte_cryptodev_pmd_init_params *init_params)
720 {
721         struct rte_cryptodev *cryptodev;
722         struct virtio_crypto_hw *hw;
723
724         PMD_INIT_FUNC_TRACE();
725
726         cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
727                                         init_params);
728         if (cryptodev == NULL)
729                 return -ENODEV;
730
731         cryptodev->driver_id = cryptodev_virtio_driver_id;
732         cryptodev->dev_ops = &virtio_crypto_dev_ops;
733
734         cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
735         cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
736
737         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
738                 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
739                 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
740
741         hw = cryptodev->data->dev_private;
742         hw->dev_id = cryptodev->data->dev_id;
743         hw->virtio_dev_capabilities = virtio_capabilities;
744
745         VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
746                 cryptodev->data->dev_id, pci_dev->id.vendor_id,
747                 pci_dev->id.device_id);
748
749         /* pci device init */
750         if (vtpci_cryptodev_init(pci_dev, hw))
751                 return -1;
752
753         if (virtio_crypto_init_device(cryptodev,
754                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
755                 return -1;
756
757         return 0;
758 }
759
760 static int
761 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
762 {
763         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
764
765         PMD_INIT_FUNC_TRACE();
766
767         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
768                 return -EPERM;
769
770         if (cryptodev->data->dev_started) {
771                 virtio_crypto_dev_stop(cryptodev);
772                 virtio_crypto_dev_close(cryptodev);
773         }
774
775         cryptodev->dev_ops = NULL;
776         cryptodev->enqueue_burst = NULL;
777         cryptodev->dequeue_burst = NULL;
778
779         /* release control queue */
780         virtio_crypto_queue_release(hw->cvq);
781
782         rte_free(cryptodev->data);
783         cryptodev->data = NULL;
784
785         VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
786
787         return 0;
788 }
789
790 static int
791 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
792         struct rte_cryptodev_config *config __rte_unused)
793 {
794         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
795
796         PMD_INIT_FUNC_TRACE();
797
798         if (virtio_crypto_init_device(cryptodev,
799                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
800                 return -1;
801
802         /* setup control queue
803          * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
804          * config->max_dataqueues is the control queue
805          */
806         if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
807                 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
808                 return -1;
809         }
810         virtio_crypto_ctrlq_start(cryptodev);
811
812         return 0;
813 }
814
815 static void
816 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
817 {
818         struct virtio_crypto_hw *hw = dev->data->dev_private;
819
820         PMD_INIT_FUNC_TRACE();
821         VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
822
823         vtpci_cryptodev_reset(hw);
824
825         virtio_crypto_dev_free_mbufs(dev);
826         virtio_crypto_free_queues(dev);
827
828         dev->data->dev_started = 0;
829 }
830
831 static int
832 virtio_crypto_dev_start(struct rte_cryptodev *dev)
833 {
834         struct virtio_crypto_hw *hw = dev->data->dev_private;
835
836         if (dev->data->dev_started)
837                 return 0;
838
839         /* Do final configuration before queue engine starts */
840         virtio_crypto_dataq_start(dev);
841         vtpci_cryptodev_reinit_complete(hw);
842
843         dev->data->dev_started = 1;
844
845         return 0;
846 }
847
848 static void
849 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
850 {
851         uint32_t i;
852         struct virtio_crypto_hw *hw = dev->data->dev_private;
853
854         for (i = 0; i < hw->max_dataqueues; i++) {
855                 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
856                         "and unused buf", i);
857                 VIRTQUEUE_DUMP((struct virtqueue *)
858                         dev->data->queue_pairs[i]);
859
860                 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
861                                 i, dev->data->queue_pairs[i]);
862
863                 virtqueue_detatch_unused(dev->data->queue_pairs[i]);
864
865                 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
866                                         "unused buf", i);
867                 VIRTQUEUE_DUMP(
868                         (struct virtqueue *)dev->data->queue_pairs[i]);
869         }
870 }
871
872 static unsigned int
873 virtio_crypto_sym_get_session_private_size(
874                 struct rte_cryptodev *dev __rte_unused)
875 {
876         PMD_INIT_FUNC_TRACE();
877
878         return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
879 }
880
881 static int
882 virtio_crypto_check_sym_session_paras(
883                 struct rte_cryptodev *dev)
884 {
885         struct virtio_crypto_hw *hw;
886
887         PMD_INIT_FUNC_TRACE();
888
889         if (unlikely(dev == NULL)) {
890                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
891                 return -1;
892         }
893         if (unlikely(dev->data == NULL)) {
894                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
895                 return -1;
896         }
897         hw = dev->data->dev_private;
898         if (unlikely(hw == NULL)) {
899                 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
900                 return -1;
901         }
902         if (unlikely(hw->cvq == NULL)) {
903                 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
904                 return -1;
905         }
906
907         return 0;
908 }
909
910 static int
911 virtio_crypto_check_sym_clear_session_paras(
912                 struct rte_cryptodev *dev,
913                 struct rte_cryptodev_sym_session *sess)
914 {
915         PMD_INIT_FUNC_TRACE();
916
917         if (sess == NULL) {
918                 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
919                 return -1;
920         }
921
922         return virtio_crypto_check_sym_session_paras(dev);
923 }
924
925 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
926
927 static void
928 virtio_crypto_sym_clear_session(
929                 struct rte_cryptodev *dev,
930                 struct rte_cryptodev_sym_session *sess)
931 {
932         struct virtio_crypto_hw *hw;
933         struct virtqueue *vq;
934         struct virtio_crypto_session *session;
935         struct virtio_crypto_op_ctrl_req *ctrl;
936         struct vring_desc *desc;
937         uint8_t *status;
938         uint8_t needed = 1;
939         uint32_t head;
940         uint8_t *malloc_virt_addr;
941         uint64_t malloc_phys_addr;
942         uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
943         uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
944         uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
945
946         PMD_INIT_FUNC_TRACE();
947
948         if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
949                 return;
950
951         hw = dev->data->dev_private;
952         vq = hw->cvq;
953         session = (struct virtio_crypto_session *)get_sym_session_private_data(
954                 sess, cryptodev_virtio_driver_id);
955         if (session == NULL) {
956                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
957                 return;
958         }
959
960         VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
961                         "vq = %p", vq->vq_desc_head_idx, vq);
962
963         if (vq->vq_free_cnt < needed) {
964                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
965                                 "vq->vq_free_cnt = %d is less than %d, "
966                                 "not enough", vq->vq_free_cnt, needed);
967                 return;
968         }
969
970         /*
971          * malloc memory to store information of ctrl request op,
972          * returned status and desc vring
973          */
974         malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
975                 + NUM_ENTRY_SYM_CLEAR_SESSION
976                 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
977         if (malloc_virt_addr == NULL) {
978                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
979                 return;
980         }
981         malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
982
983         /* assign ctrl request op part */
984         ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
985         ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
986         /* default data virtqueue is 0 */
987         ctrl->header.queue_id = 0;
988         ctrl->u.destroy_session.session_id = session->session_id;
989
990         /* status part */
991         status = &(((struct virtio_crypto_inhdr *)
992                 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
993         *status = VIRTIO_CRYPTO_ERR;
994
995         /* indirect desc vring part */
996         desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
997                 + desc_offset);
998
999         /* ctrl request part */
1000         desc[0].addr = malloc_phys_addr;
1001         desc[0].len = len_op_ctrl_req;
1002         desc[0].flags = VRING_DESC_F_NEXT;
1003         desc[0].next = 1;
1004
1005         /* status part */
1006         desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
1007         desc[1].len = len_inhdr;
1008         desc[1].flags = VRING_DESC_F_WRITE;
1009
1010         /* use only a single desc entry */
1011         head = vq->vq_desc_head_idx;
1012         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
1013         vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
1014         vq->vq_ring.desc[head].len
1015                 = NUM_ENTRY_SYM_CLEAR_SESSION
1016                 * sizeof(struct vring_desc);
1017         vq->vq_free_cnt -= needed;
1018
1019         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
1020
1021         vq_update_avail_ring(vq, head);
1022         vq_update_avail_idx(vq);
1023
1024         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
1025                                         vq->vq_queue_index);
1026
1027         virtqueue_notify(vq);
1028
1029         rte_rmb();
1030         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
1031                 rte_rmb();
1032                 usleep(100);
1033         }
1034
1035         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
1036                 uint32_t idx, desc_idx, used_idx;
1037                 struct vring_used_elem *uep;
1038
1039                 used_idx = (uint32_t)(vq->vq_used_cons_idx
1040                                 & (vq->vq_nentries - 1));
1041                 uep = &vq->vq_ring.used->ring[used_idx];
1042                 idx = (uint32_t) uep->id;
1043                 desc_idx = idx;
1044                 while (vq->vq_ring.desc[desc_idx].flags
1045                                 & VRING_DESC_F_NEXT) {
1046                         desc_idx = vq->vq_ring.desc[desc_idx].next;
1047                         vq->vq_free_cnt++;
1048                 }
1049
1050                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
1051                 vq->vq_desc_head_idx = idx;
1052                 vq->vq_used_cons_idx++;
1053                 vq->vq_free_cnt++;
1054         }
1055
1056         if (*status != VIRTIO_CRYPTO_OK) {
1057                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1058                                 "status=%"PRIu32", session_id=%"PRIu64"",
1059                                 *status, session->session_id);
1060                 rte_free(malloc_virt_addr);
1061                 return;
1062         }
1063
1064         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
1065                         "vq->vq_desc_head_idx=%d",
1066                         vq->vq_free_cnt, vq->vq_desc_head_idx);
1067
1068         VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1069                         session->session_id);
1070
1071         memset(session, 0, sizeof(struct virtio_crypto_session));
1072         struct rte_mempool *sess_mp = rte_mempool_from_obj(session);
1073         set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL);
1074         rte_mempool_put(sess_mp, session);
1075         rte_free(malloc_virt_addr);
1076 }
1077
1078 static struct rte_crypto_cipher_xform *
1079 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1080 {
1081         do {
1082                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1083                         return &xform->cipher;
1084
1085                 xform = xform->next;
1086         } while (xform);
1087
1088         return NULL;
1089 }
1090
1091 static struct rte_crypto_auth_xform *
1092 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1093 {
1094         do {
1095                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1096                         return &xform->auth;
1097
1098                 xform = xform->next;
1099         } while (xform);
1100
1101         return NULL;
1102 }
1103
1104 /** Get xform chain order */
1105 static int
1106 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1107 {
1108         if (xform == NULL)
1109                 return -1;
1110
1111         /* Cipher Only */
1112         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1113                         xform->next == NULL)
1114                 return VIRTIO_CRYPTO_CMD_CIPHER;
1115
1116         /* Authentication Only */
1117         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1118                         xform->next == NULL)
1119                 return VIRTIO_CRYPTO_CMD_AUTH;
1120
1121         /* Authenticate then Cipher */
1122         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1123                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1124                 return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1125
1126         /* Cipher then Authenticate */
1127         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1128                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1129                 return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1130
1131         return -1;
1132 }
1133
1134 static int
1135 virtio_crypto_sym_pad_cipher_param(
1136                 struct virtio_crypto_cipher_session_para *para,
1137                 struct rte_crypto_cipher_xform *cipher_xform)
1138 {
1139         switch (cipher_xform->algo) {
1140         case RTE_CRYPTO_CIPHER_AES_CBC:
1141                 para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
1142                 break;
1143         default:
1144                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1145                                 "Cipher alg %u", cipher_xform->algo);
1146                 return -1;
1147         }
1148
1149         para->keylen = cipher_xform->key.length;
1150         switch (cipher_xform->op) {
1151         case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1152                 para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1153                 break;
1154         case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1155                 para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1156                 break;
1157         default:
1158                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1159                                         "parameter");
1160                 return -1;
1161         }
1162
1163         return 0;
1164 }
1165
1166 static int
1167 virtio_crypto_sym_pad_auth_param(
1168                 struct virtio_crypto_op_ctrl_req *ctrl,
1169                 struct rte_crypto_auth_xform *auth_xform)
1170 {
1171         uint32_t *algo;
1172         struct virtio_crypto_alg_chain_session_para *para =
1173                 &(ctrl->u.sym_create_session.u.chain.para);
1174
1175         switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1176         case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1177                 algo = &(para->u.hash_param.algo);
1178                 break;
1179         case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1180                 algo = &(para->u.mac_param.algo);
1181                 break;
1182         default:
1183                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1184                         "specified",
1185                         ctrl->u.sym_create_session.u.chain.para.hash_mode);
1186                 return -1;
1187         }
1188
1189         switch (auth_xform->algo) {
1190         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1191                 *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
1192                 break;
1193         default:
1194                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1195                         "Crypto: Undefined Hash algo %u specified",
1196                         auth_xform->algo);
1197                 return -1;
1198         }
1199
1200         return 0;
1201 }
1202
1203 static int
1204 virtio_crypto_sym_pad_op_ctrl_req(
1205                 struct virtio_crypto_op_ctrl_req *ctrl,
1206                 struct rte_crypto_sym_xform *xform, bool is_chainned,
1207                 uint8_t *cipher_key_data, uint8_t *auth_key_data,
1208                 struct virtio_crypto_session *session)
1209 {
1210         int ret;
1211         struct rte_crypto_auth_xform *auth_xform = NULL;
1212         struct rte_crypto_cipher_xform *cipher_xform = NULL;
1213
1214         /* Get cipher xform from crypto xform chain */
1215         cipher_xform = virtio_crypto_get_cipher_xform(xform);
1216         if (cipher_xform) {
1217                 if (cipher_xform->key.length > VIRTIO_CRYPTO_MAX_KEY_SIZE) {
1218                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1219                                 "cipher key size cannot be longer than %u",
1220                                 VIRTIO_CRYPTO_MAX_KEY_SIZE);
1221                         return -1;
1222                 }
1223                 if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
1224                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1225                                 "cipher IV size cannot be longer than %u",
1226                                 VIRTIO_CRYPTO_MAX_IV_SIZE);
1227                         return -1;
1228                 }
1229                 if (is_chainned)
1230                         ret = virtio_crypto_sym_pad_cipher_param(
1231                                 &ctrl->u.sym_create_session.u.chain.para
1232                                                 .cipher_param, cipher_xform);
1233                 else
1234                         ret = virtio_crypto_sym_pad_cipher_param(
1235                                 &ctrl->u.sym_create_session.u.cipher.para,
1236                                 cipher_xform);
1237
1238                 if (ret < 0) {
1239                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1240                                 "pad cipher parameter failed");
1241                         return -1;
1242                 }
1243
1244                 memcpy(cipher_key_data, cipher_xform->key.data,
1245                                 cipher_xform->key.length);
1246
1247                 session->iv.offset = cipher_xform->iv.offset;
1248                 session->iv.length = cipher_xform->iv.length;
1249         }
1250
1251         /* Get auth xform from crypto xform chain */
1252         auth_xform = virtio_crypto_get_auth_xform(xform);
1253         if (auth_xform) {
1254                 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1255                 struct virtio_crypto_alg_chain_session_para *para =
1256                         &(ctrl->u.sym_create_session.u.chain.para);
1257                 if (auth_xform->key.length) {
1258                         if (auth_xform->key.length >
1259                                         VIRTIO_CRYPTO_MAX_KEY_SIZE) {
1260                                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1261                                 "auth key size cannot be longer than %u",
1262                                         VIRTIO_CRYPTO_MAX_KEY_SIZE);
1263                                 return -1;
1264                         }
1265                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1266                         para->u.mac_param.auth_key_len =
1267                                 (uint32_t)auth_xform->key.length;
1268                         para->u.mac_param.hash_result_len =
1269                                 auth_xform->digest_length;
1270                         memcpy(auth_key_data, auth_xform->key.data,
1271                                         auth_xform->key.length);
1272                 } else {
1273                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1274                         para->u.hash_param.hash_result_len =
1275                                 auth_xform->digest_length;
1276                 }
1277
1278                 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1279                 if (ret < 0) {
1280                         VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1281                                                 "failed");
1282                         return -1;
1283                 }
1284         }
1285
1286         return 0;
1287 }
1288
1289 static int
1290 virtio_crypto_check_sym_configure_session_paras(
1291                 struct rte_cryptodev *dev,
1292                 struct rte_crypto_sym_xform *xform,
1293                 struct rte_cryptodev_sym_session *sym_sess,
1294                 struct rte_mempool *mempool)
1295 {
1296         if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
1297                 unlikely(mempool == NULL)) {
1298                 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1299                 return -1;
1300         }
1301
1302         if (virtio_crypto_check_sym_session_paras(dev) < 0)
1303                 return -1;
1304
1305         return 0;
1306 }
1307
1308 static int
1309 virtio_crypto_sym_configure_session(
1310                 struct rte_cryptodev *dev,
1311                 struct rte_crypto_sym_xform *xform,
1312                 struct rte_cryptodev_sym_session *sess,
1313                 struct rte_mempool *mempool)
1314 {
1315         int ret;
1316         struct virtio_crypto_session crypto_sess;
1317         void *session_private = &crypto_sess;
1318         struct virtio_crypto_session *session;
1319         struct virtio_crypto_op_ctrl_req *ctrl_req;
1320         enum virtio_crypto_cmd_id cmd_id;
1321         uint8_t cipher_key_data[VIRTIO_CRYPTO_MAX_KEY_SIZE] = {0};
1322         uint8_t auth_key_data[VIRTIO_CRYPTO_MAX_KEY_SIZE] = {0};
1323         struct virtio_crypto_hw *hw;
1324         struct virtqueue *control_vq;
1325
1326         PMD_INIT_FUNC_TRACE();
1327
1328         ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1329                         sess, mempool);
1330         if (ret < 0) {
1331                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1332                 return ret;
1333         }
1334
1335         if (rte_mempool_get(mempool, &session_private)) {
1336                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1337                         "Couldn't get object from session mempool");
1338                 return -ENOMEM;
1339         }
1340
1341         session = (struct virtio_crypto_session *)session_private;
1342         memset(session, 0, sizeof(struct virtio_crypto_session));
1343         ctrl_req = &session->ctrl;
1344         ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1345         /* FIXME: support multiqueue */
1346         ctrl_req->header.queue_id = 0;
1347
1348         hw = dev->data->dev_private;
1349         control_vq = hw->cvq;
1350
1351         cmd_id = virtio_crypto_get_chain_order(xform);
1352         if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1353                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1354                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1355         if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1356                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1357                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1358
1359         switch (cmd_id) {
1360         case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1361         case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1362                 ctrl_req->u.sym_create_session.op_type
1363                         = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1364
1365                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1366                         xform, true, cipher_key_data, auth_key_data, session);
1367                 if (ret < 0) {
1368                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1369                                 "padding sym op ctrl req failed");
1370                         goto error_out;
1371                 }
1372                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1373                         cipher_key_data, auth_key_data, session);
1374                 if (ret < 0) {
1375                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1376                                 "create session failed: %d", ret);
1377                         goto error_out;
1378                 }
1379                 break;
1380         case VIRTIO_CRYPTO_CMD_CIPHER:
1381                 ctrl_req->u.sym_create_session.op_type
1382                         = VIRTIO_CRYPTO_SYM_OP_CIPHER;
1383                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1384                         false, cipher_key_data, auth_key_data, session);
1385                 if (ret < 0) {
1386                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1387                                 "padding sym op ctrl req failed");
1388                         goto error_out;
1389                 }
1390                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1391                         cipher_key_data, NULL, session);
1392                 if (ret < 0) {
1393                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1394                                 "create session failed: %d", ret);
1395                         goto error_out;
1396                 }
1397                 break;
1398         default:
1399                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1400                         "Unsupported operation chain order parameter");
1401                 goto error_out;
1402         }
1403
1404         set_sym_session_private_data(sess, dev->driver_id,
1405                 session_private);
1406
1407         return 0;
1408
1409 error_out:
1410         return -1;
1411 }
1412
1413 static void
1414 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1415                 struct rte_cryptodev_info *info)
1416 {
1417         struct virtio_crypto_hw *hw = dev->data->dev_private;
1418
1419         PMD_INIT_FUNC_TRACE();
1420
1421         if (info != NULL) {
1422                 info->driver_id = cryptodev_virtio_driver_id;
1423                 info->feature_flags = dev->feature_flags;
1424                 info->max_nb_queue_pairs = hw->max_dataqueues;
1425                 /* No limit of number of sessions */
1426                 info->sym.max_nb_sessions = 0;
1427                 info->capabilities = hw->virtio_dev_capabilities;
1428         }
1429 }
1430
1431 static int
1432 crypto_virtio_pci_probe(
1433         struct rte_pci_driver *pci_drv __rte_unused,
1434         struct rte_pci_device *pci_dev)
1435 {
1436         struct rte_cryptodev_pmd_init_params init_params = {
1437                 .name = "",
1438                 .socket_id = pci_dev->device.numa_node,
1439                 .private_data_size = sizeof(struct virtio_crypto_hw)
1440         };
1441         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1442
1443         VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1444                         pci_dev->addr.bus,
1445                         pci_dev->addr.devid,
1446                         pci_dev->addr.function);
1447
1448         rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1449
1450         return crypto_virtio_create(name, pci_dev, &init_params);
1451 }
1452
1453 static int
1454 crypto_virtio_pci_remove(
1455         struct rte_pci_device *pci_dev __rte_unused)
1456 {
1457         struct rte_cryptodev *cryptodev;
1458         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1459
1460         if (pci_dev == NULL)
1461                 return -EINVAL;
1462
1463         rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1464                         sizeof(cryptodev_name));
1465
1466         cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1467         if (cryptodev == NULL)
1468                 return -ENODEV;
1469
1470         return virtio_crypto_dev_uninit(cryptodev);
1471 }
1472
1473 static struct rte_pci_driver rte_virtio_crypto_driver = {
1474         .id_table = pci_id_virtio_crypto_map,
1475         .drv_flags = 0,
1476         .probe = crypto_virtio_pci_probe,
1477         .remove = crypto_virtio_pci_remove
1478 };
1479
1480 static struct cryptodev_driver virtio_crypto_drv;
1481
1482 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1483 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1484         rte_virtio_crypto_driver.driver,
1485         cryptodev_virtio_driver_id);
1486 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_init, init, NOTICE);
1487 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_session, session, NOTICE);
1488 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_rx, rx, NOTICE);
1489 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_tx, tx, NOTICE);
1490 RTE_LOG_REGISTER_SUFFIX(virtio_crypto_logtype_driver, driver, NOTICE);