f9c890bb3571f390383cff9cdacabe7bf833c0ce
[dpdk.git] / drivers / crypto / virtio / virtio_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 #include <stdbool.h>
5 #include <unistd.h>
6
7 #include <rte_common.h>
8 #include <rte_errno.h>
9 #include <rte_pci.h>
10 #include <rte_bus_pci.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_eal.h>
14
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
18 #include "virtio_crypto_capabilities.h"
19
20 int virtio_crypto_logtype_init;
21 int virtio_crypto_logtype_session;
22 int virtio_crypto_logtype_rx;
23 int virtio_crypto_logtype_tx;
24 int virtio_crypto_logtype_driver;
25
26 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
27                 struct rte_cryptodev_config *config);
28 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
29 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
30 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
31 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
32                 struct rte_cryptodev_info *dev_info);
33 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
34                 struct rte_cryptodev_stats *stats);
35 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
36 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
37                 uint16_t queue_pair_id,
38                 const struct rte_cryptodev_qp_conf *qp_conf,
39                 int socket_id,
40                 struct rte_mempool *session_pool);
41 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
42                 uint16_t queue_pair_id);
43 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
44 static unsigned int virtio_crypto_sym_get_session_private_size(
45                 struct rte_cryptodev *dev);
46 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
47                 struct rte_cryptodev_sym_session *sess);
48 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
49                 struct rte_crypto_sym_xform *xform,
50                 struct rte_cryptodev_sym_session *session,
51                 struct rte_mempool *mp);
52
53 /*
54  * The set of PCI devices this driver supports
55  */
56 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
57         { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
58                                 VIRTIO_CRYPTO_PCI_DEVICEID) },
59         { .vendor_id = 0, /* sentinel */ },
60 };
61
62 static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
63         VIRTIO_SYM_CAPABILITIES,
64         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
65 };
66
67 uint8_t cryptodev_virtio_driver_id;
68
69 #define NUM_ENTRY_SYM_CREATE_SESSION 4
70
71 static int
72 virtio_crypto_send_command(struct virtqueue *vq,
73                 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
74                 uint8_t *auth_key, struct virtio_crypto_session *session)
75 {
76         uint8_t idx = 0;
77         uint8_t needed = 1;
78         uint32_t head = 0;
79         uint32_t len_cipher_key = 0;
80         uint32_t len_auth_key = 0;
81         uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
82         uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
83         uint32_t len_total = 0;
84         uint32_t input_offset = 0;
85         void *virt_addr_started = NULL;
86         phys_addr_t phys_addr_started;
87         struct vring_desc *desc;
88         uint32_t desc_offset;
89         struct virtio_crypto_session_input *input;
90         int ret;
91
92         PMD_INIT_FUNC_TRACE();
93
94         if (session == NULL) {
95                 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
96                 return -EINVAL;
97         }
98         /* cipher only is supported, it is available if auth_key is NULL */
99         if (!cipher_key) {
100                 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
101                 return -EINVAL;
102         }
103
104         head = vq->vq_desc_head_idx;
105         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
106                                         head, vq);
107
108         if (vq->vq_free_cnt < needed) {
109                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
110                 return -ENOSPC;
111         }
112
113         /* calculate the length of cipher key */
114         if (cipher_key) {
115                 switch (ctrl->u.sym_create_session.op_type) {
116                 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
117                         len_cipher_key
118                                 = ctrl->u.sym_create_session.u.cipher
119                                                         .para.keylen;
120                         break;
121                 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
122                         len_cipher_key
123                                 = ctrl->u.sym_create_session.u.chain
124                                         .para.cipher_param.keylen;
125                         break;
126                 default:
127                         VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
128                         return -EINVAL;
129                 }
130         }
131
132         /* calculate the length of auth key */
133         if (auth_key) {
134                 len_auth_key =
135                         ctrl->u.sym_create_session.u.chain.para.u.mac_param
136                                 .auth_key_len;
137         }
138
139         /*
140          * malloc memory to store indirect vring_desc entries, including
141          * ctrl request, cipher key, auth key, session input and desc vring
142          */
143         desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
144                 + len_session_input;
145         virt_addr_started = rte_malloc(NULL,
146                 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
147                         * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
148         if (virt_addr_started == NULL) {
149                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
150                 return -ENOSPC;
151         }
152         phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
153
154         /* address to store indirect vring desc entries */
155         desc = (struct vring_desc *)
156                 ((uint8_t *)virt_addr_started + desc_offset);
157
158         /*  ctrl req part */
159         memcpy(virt_addr_started, ctrl, len_ctrl_req);
160         desc[idx].addr = phys_addr_started;
161         desc[idx].len = len_ctrl_req;
162         desc[idx].flags = VRING_DESC_F_NEXT;
163         desc[idx].next = idx + 1;
164         idx++;
165         len_total += len_ctrl_req;
166         input_offset += len_ctrl_req;
167
168         /* cipher key part */
169         if (len_cipher_key > 0) {
170                 memcpy((uint8_t *)virt_addr_started + len_total,
171                         cipher_key, len_cipher_key);
172
173                 desc[idx].addr = phys_addr_started + len_total;
174                 desc[idx].len = len_cipher_key;
175                 desc[idx].flags = VRING_DESC_F_NEXT;
176                 desc[idx].next = idx + 1;
177                 idx++;
178                 len_total += len_cipher_key;
179                 input_offset += len_cipher_key;
180         }
181
182         /* auth key part */
183         if (len_auth_key > 0) {
184                 memcpy((uint8_t *)virt_addr_started + len_total,
185                         auth_key, len_auth_key);
186
187                 desc[idx].addr = phys_addr_started + len_total;
188                 desc[idx].len = len_auth_key;
189                 desc[idx].flags = VRING_DESC_F_NEXT;
190                 desc[idx].next = idx + 1;
191                 idx++;
192                 len_total += len_auth_key;
193                 input_offset += len_auth_key;
194         }
195
196         /* input part */
197         input = (struct virtio_crypto_session_input *)
198                 ((uint8_t *)virt_addr_started + input_offset);
199         input->status = VIRTIO_CRYPTO_ERR;
200         input->session_id = ~0ULL;
201         desc[idx].addr = phys_addr_started + len_total;
202         desc[idx].len = len_session_input;
203         desc[idx].flags = VRING_DESC_F_WRITE;
204         idx++;
205
206         /* use a single desc entry */
207         vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
208         vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
209         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
210         vq->vq_free_cnt--;
211
212         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
213
214         vq_update_avail_ring(vq, head);
215         vq_update_avail_idx(vq);
216
217         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
218                                         vq->vq_queue_index);
219
220         virtqueue_notify(vq);
221
222         rte_rmb();
223         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
224                 rte_rmb();
225                 usleep(100);
226         }
227
228         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
229                 uint32_t idx, desc_idx, used_idx;
230                 struct vring_used_elem *uep;
231
232                 used_idx = (uint32_t)(vq->vq_used_cons_idx
233                                 & (vq->vq_nentries - 1));
234                 uep = &vq->vq_ring.used->ring[used_idx];
235                 idx = (uint32_t) uep->id;
236                 desc_idx = idx;
237
238                 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
239                         desc_idx = vq->vq_ring.desc[desc_idx].next;
240                         vq->vq_free_cnt++;
241                 }
242
243                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
244                 vq->vq_desc_head_idx = idx;
245
246                 vq->vq_used_cons_idx++;
247                 vq->vq_free_cnt++;
248         }
249
250         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
251                         "vq->vq_desc_head_idx=%d",
252                         vq->vq_free_cnt, vq->vq_desc_head_idx);
253
254         /* get the result */
255         if (input->status != VIRTIO_CRYPTO_OK) {
256                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
257                                 "status=%u, session_id=%" PRIu64 "",
258                                 input->status, input->session_id);
259                 rte_free(virt_addr_started);
260                 ret = -1;
261         } else {
262                 session->session_id = input->session_id;
263
264                 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
265                                 "session_id=%" PRIu64 "", input->session_id);
266                 rte_free(virt_addr_started);
267                 ret = 0;
268         }
269
270         return ret;
271 }
272
273 void
274 virtio_crypto_queue_release(struct virtqueue *vq)
275 {
276         struct virtio_crypto_hw *hw;
277
278         PMD_INIT_FUNC_TRACE();
279
280         if (vq) {
281                 hw = vq->hw;
282                 /* Select and deactivate the queue */
283                 VTPCI_OPS(hw)->del_queue(hw, vq);
284
285                 rte_memzone_free(vq->mz);
286                 rte_mempool_free(vq->mpool);
287                 rte_free(vq);
288         }
289 }
290
291 #define MPOOL_MAX_NAME_SZ 32
292
293 int
294 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
295                 int queue_type,
296                 uint16_t vtpci_queue_idx,
297                 uint16_t nb_desc,
298                 int socket_id,
299                 struct virtqueue **pvq)
300 {
301         char vq_name[VIRTQUEUE_MAX_NAME_SZ];
302         char mpool_name[MPOOL_MAX_NAME_SZ];
303         const struct rte_memzone *mz;
304         unsigned int vq_size, size;
305         struct virtio_crypto_hw *hw = dev->data->dev_private;
306         struct virtqueue *vq = NULL;
307         uint32_t i = 0;
308         uint32_t j;
309
310         PMD_INIT_FUNC_TRACE();
311
312         VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
313
314         /*
315          * Read the virtqueue size from the Queue Size field
316          * Always power of 2 and if 0 virtqueue does not exist
317          */
318         vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
319         if (vq_size == 0) {
320                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
321                 return -EINVAL;
322         }
323         VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
324
325         if (!rte_is_power_of_2(vq_size)) {
326                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
327                 return -EINVAL;
328         }
329
330         if (queue_type == VTCRYPTO_DATAQ) {
331                 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
332                                 dev->data->dev_id, vtpci_queue_idx);
333                 snprintf(mpool_name, sizeof(mpool_name),
334                                 "dev%d_dataqueue%d_mpool",
335                                 dev->data->dev_id, vtpci_queue_idx);
336         } else if (queue_type == VTCRYPTO_CTRLQ) {
337                 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
338                                 dev->data->dev_id);
339                 snprintf(mpool_name, sizeof(mpool_name),
340                                 "dev%d_controlqueue_mpool",
341                                 dev->data->dev_id);
342         }
343         size = RTE_ALIGN_CEIL(sizeof(*vq) +
344                                 vq_size * sizeof(struct vq_desc_extra),
345                                 RTE_CACHE_LINE_SIZE);
346         vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
347                                 socket_id);
348         if (vq == NULL) {
349                 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
350                 return -ENOMEM;
351         }
352
353         if (queue_type == VTCRYPTO_DATAQ) {
354                 /* pre-allocate a mempool and use it in the data plane to
355                  * improve performance
356                  */
357                 vq->mpool = rte_mempool_lookup(mpool_name);
358                 if (vq->mpool == NULL)
359                         vq->mpool = rte_mempool_create(mpool_name,
360                                         vq_size,
361                                         sizeof(struct virtio_crypto_op_cookie),
362                                         RTE_CACHE_LINE_SIZE, 0,
363                                         NULL, NULL, NULL, NULL, socket_id,
364                                         0);
365                 if (!vq->mpool) {
366                         VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
367                                         "Cannot create mempool");
368                         goto mpool_create_err;
369                 }
370                 for (i = 0; i < vq_size; i++) {
371                         vq->vq_descx[i].cookie =
372                                 rte_zmalloc("crypto PMD op cookie pointer",
373                                         sizeof(struct virtio_crypto_op_cookie),
374                                         RTE_CACHE_LINE_SIZE);
375                         if (vq->vq_descx[i].cookie == NULL) {
376                                 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
377                                                 "alloc mem for cookie");
378                                 goto cookie_alloc_err;
379                         }
380                 }
381         }
382
383         vq->hw = hw;
384         vq->dev_id = dev->data->dev_id;
385         vq->vq_queue_index = vtpci_queue_idx;
386         vq->vq_nentries = vq_size;
387
388         /*
389          * Using part of the vring entries is permitted, but the maximum
390          * is vq_size
391          */
392         if (nb_desc == 0 || nb_desc > vq_size)
393                 nb_desc = vq_size;
394         vq->vq_free_cnt = nb_desc;
395
396         /*
397          * Reserve a memzone for vring elements
398          */
399         size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
400         vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
401         VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
402                         (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
403                         size, vq->vq_ring_size);
404
405         mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
406                         socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
407         if (mz == NULL) {
408                 if (rte_errno == EEXIST)
409                         mz = rte_memzone_lookup(vq_name);
410                 if (mz == NULL) {
411                         VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
412                         goto mz_reserve_err;
413                 }
414         }
415
416         /*
417          * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
418          * and only accepts 32 bit page frame number.
419          * Check if the allocated physical memory exceeds 16TB.
420          */
421         if ((mz->phys_addr + vq->vq_ring_size - 1)
422                                 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
423                 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
424                                         "above 16TB!");
425                 goto vring_addr_err;
426         }
427
428         memset(mz->addr, 0, sizeof(mz->len));
429         vq->mz = mz;
430         vq->vq_ring_mem = mz->phys_addr;
431         vq->vq_ring_virt_mem = mz->addr;
432         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
433                                         (uint64_t)mz->phys_addr);
434         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
435                                         (uint64_t)(uintptr_t)mz->addr);
436
437         *pvq = vq;
438
439         return 0;
440
441 vring_addr_err:
442         rte_memzone_free(mz);
443 mz_reserve_err:
444 cookie_alloc_err:
445         rte_mempool_free(vq->mpool);
446         if (i != 0) {
447                 for (j = 0; j < i; j++)
448                         rte_free(vq->vq_descx[j].cookie);
449         }
450 mpool_create_err:
451         rte_free(vq);
452         return -ENOMEM;
453 }
454
455 static int
456 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
457 {
458         int ret;
459         struct virtqueue *vq;
460         struct virtio_crypto_hw *hw = dev->data->dev_private;
461
462         /* if virtio device has started, do not touch the virtqueues */
463         if (dev->data->dev_started)
464                 return 0;
465
466         PMD_INIT_FUNC_TRACE();
467
468         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
469                         0, SOCKET_ID_ANY, &vq);
470         if (ret < 0) {
471                 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
472                 return ret;
473         }
474
475         hw->cvq = vq;
476
477         return 0;
478 }
479
480 static void
481 virtio_crypto_free_queues(struct rte_cryptodev *dev)
482 {
483         unsigned int i;
484         struct virtio_crypto_hw *hw = dev->data->dev_private;
485
486         PMD_INIT_FUNC_TRACE();
487
488         /* control queue release */
489         virtio_crypto_queue_release(hw->cvq);
490
491         /* data queue release */
492         for (i = 0; i < hw->max_dataqueues; i++)
493                 virtio_crypto_queue_release(dev->data->queue_pairs[i]);
494 }
495
496 static int
497 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
498 {
499         return 0;
500 }
501
502 /*
503  * dev_ops for virtio, bare necessities for basic operation
504  */
505 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
506         /* Device related operations */
507         .dev_configure                   = virtio_crypto_dev_configure,
508         .dev_start                       = virtio_crypto_dev_start,
509         .dev_stop                        = virtio_crypto_dev_stop,
510         .dev_close                       = virtio_crypto_dev_close,
511         .dev_infos_get                   = virtio_crypto_dev_info_get,
512
513         .stats_get                       = virtio_crypto_dev_stats_get,
514         .stats_reset                     = virtio_crypto_dev_stats_reset,
515
516         .queue_pair_setup                = virtio_crypto_qp_setup,
517         .queue_pair_release              = virtio_crypto_qp_release,
518         .queue_pair_start                = NULL,
519         .queue_pair_stop                 = NULL,
520         .queue_pair_count                = NULL,
521
522         /* Crypto related operations */
523         .session_get_size       = virtio_crypto_sym_get_session_private_size,
524         .session_configure      = virtio_crypto_sym_configure_session,
525         .session_clear          = virtio_crypto_sym_clear_session,
526         .qp_attach_session = NULL,
527         .qp_detach_session = NULL
528 };
529
530 static void
531 virtio_crypto_update_stats(struct rte_cryptodev *dev,
532                 struct rte_cryptodev_stats *stats)
533 {
534         unsigned int i;
535         struct virtio_crypto_hw *hw = dev->data->dev_private;
536
537         PMD_INIT_FUNC_TRACE();
538
539         if (stats == NULL) {
540                 VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
541                 return;
542         }
543
544         for (i = 0; i < hw->max_dataqueues; i++) {
545                 const struct virtqueue *data_queue
546                         = dev->data->queue_pairs[i];
547                 if (data_queue == NULL)
548                         continue;
549
550                 stats->enqueued_count += data_queue->packets_sent_total;
551                 stats->enqueue_err_count += data_queue->packets_sent_failed;
552
553                 stats->dequeued_count += data_queue->packets_received_total;
554                 stats->dequeue_err_count
555                         += data_queue->packets_received_failed;
556         }
557 }
558
559 static void
560 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
561                 struct rte_cryptodev_stats *stats)
562 {
563         PMD_INIT_FUNC_TRACE();
564
565         virtio_crypto_update_stats(dev, stats);
566 }
567
568 static void
569 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
570 {
571         unsigned int i;
572         struct virtio_crypto_hw *hw = dev->data->dev_private;
573
574         PMD_INIT_FUNC_TRACE();
575
576         for (i = 0; i < hw->max_dataqueues; i++) {
577                 struct virtqueue *data_queue = dev->data->queue_pairs[i];
578                 if (data_queue == NULL)
579                         continue;
580
581                 data_queue->packets_sent_total = 0;
582                 data_queue->packets_sent_failed = 0;
583
584                 data_queue->packets_received_total = 0;
585                 data_queue->packets_received_failed = 0;
586         }
587 }
588
589 static int
590 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
591                 const struct rte_cryptodev_qp_conf *qp_conf,
592                 int socket_id,
593                 struct rte_mempool *session_pool __rte_unused)
594 {
595         int ret;
596         struct virtqueue *vq;
597
598         PMD_INIT_FUNC_TRACE();
599
600         /* if virtio dev is started, do not touch the virtqueues */
601         if (dev->data->dev_started)
602                 return 0;
603
604         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
605                         qp_conf->nb_descriptors, socket_id, &vq);
606         if (ret < 0) {
607                 VIRTIO_CRYPTO_INIT_LOG_ERR(
608                         "virtio crypto data queue initialization failed\n");
609                 return ret;
610         }
611
612         dev->data->queue_pairs[queue_pair_id] = vq;
613
614         return 0;
615 }
616
617 static int
618 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
619 {
620         struct virtqueue *vq
621                 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
622
623         PMD_INIT_FUNC_TRACE();
624
625         if (vq == NULL) {
626                 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
627                 return 0;
628         }
629
630         virtio_crypto_queue_release(vq);
631         return 0;
632 }
633
634 static int
635 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
636 {
637         uint64_t host_features;
638
639         PMD_INIT_FUNC_TRACE();
640
641         /* Prepare guest_features: feature that driver wants to support */
642         VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
643                 req_features);
644
645         /* Read device(host) feature bits */
646         host_features = VTPCI_OPS(hw)->get_features(hw);
647         VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
648                 host_features);
649
650         /*
651          * Negotiate features: Subset of device feature bits are written back
652          * guest feature bits.
653          */
654         hw->guest_features = req_features;
655         hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
656                                                         host_features);
657         VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
658                 hw->guest_features);
659
660         if (hw->modern) {
661                 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
662                         VIRTIO_CRYPTO_INIT_LOG_ERR(
663                                 "VIRTIO_F_VERSION_1 features is not enabled.");
664                         return -1;
665                 }
666                 vtpci_cryptodev_set_status(hw,
667                         VIRTIO_CONFIG_STATUS_FEATURES_OK);
668                 if (!(vtpci_cryptodev_get_status(hw) &
669                         VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
670                         VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
671                                                 "status!");
672                         return -1;
673                 }
674         }
675
676         hw->req_guest_features = req_features;
677
678         return 0;
679 }
680
681 /* reset device and renegotiate features if needed */
682 static int
683 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
684         uint64_t req_features)
685 {
686         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
687         struct virtio_crypto_config local_config;
688         struct virtio_crypto_config *config = &local_config;
689
690         PMD_INIT_FUNC_TRACE();
691
692         /* Reset the device although not necessary at startup */
693         vtpci_cryptodev_reset(hw);
694
695         /* Tell the host we've noticed this device. */
696         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
697
698         /* Tell the host we've known how to drive the device. */
699         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
700         if (virtio_negotiate_features(hw, req_features) < 0)
701                 return -1;
702
703         /* Get status of the device */
704         vtpci_read_cryptodev_config(hw,
705                 offsetof(struct virtio_crypto_config, status),
706                 &config->status, sizeof(config->status));
707         if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
708                 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
709                                 "not ready");
710                 return -1;
711         }
712
713         /* Get number of data queues */
714         vtpci_read_cryptodev_config(hw,
715                 offsetof(struct virtio_crypto_config, max_dataqueues),
716                 &config->max_dataqueues,
717                 sizeof(config->max_dataqueues));
718         hw->max_dataqueues = config->max_dataqueues;
719
720         VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
721                 hw->max_dataqueues);
722
723         return 0;
724 }
725
726 /*
727  * This function is based on probe() function
728  * It returns 0 on success.
729  */
730 static int
731 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
732                 struct rte_cryptodev_pmd_init_params *init_params)
733 {
734         struct rte_cryptodev *cryptodev;
735         struct virtio_crypto_hw *hw;
736
737         PMD_INIT_FUNC_TRACE();
738
739         cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
740                                         init_params);
741         if (cryptodev == NULL)
742                 return -ENODEV;
743
744         cryptodev->driver_id = cryptodev_virtio_driver_id;
745         cryptodev->dev_ops = &virtio_crypto_dev_ops;
746
747         cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
748         cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
749
750         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
751                 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
752
753         hw = cryptodev->data->dev_private;
754         hw->dev_id = cryptodev->data->dev_id;
755         hw->virtio_dev_capabilities = virtio_capabilities;
756
757         VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
758                 cryptodev->data->dev_id, pci_dev->id.vendor_id,
759                 pci_dev->id.device_id);
760
761         /* pci device init */
762         if (vtpci_cryptodev_init(pci_dev, hw))
763                 return -1;
764
765         if (virtio_crypto_init_device(cryptodev,
766                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
767                 return -1;
768
769         return 0;
770 }
771
772 static int
773 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
774 {
775         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
776
777         PMD_INIT_FUNC_TRACE();
778
779         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
780                 return -EPERM;
781
782         if (cryptodev->data->dev_started) {
783                 virtio_crypto_dev_stop(cryptodev);
784                 virtio_crypto_dev_close(cryptodev);
785         }
786
787         cryptodev->dev_ops = NULL;
788         cryptodev->enqueue_burst = NULL;
789         cryptodev->dequeue_burst = NULL;
790
791         /* release control queue */
792         virtio_crypto_queue_release(hw->cvq);
793
794         rte_free(cryptodev->data);
795         cryptodev->data = NULL;
796
797         VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
798
799         return 0;
800 }
801
802 static int
803 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
804         struct rte_cryptodev_config *config __rte_unused)
805 {
806         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
807
808         PMD_INIT_FUNC_TRACE();
809
810         if (virtio_crypto_init_device(cryptodev,
811                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
812                 return -1;
813
814         /* setup control queue
815          * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
816          * config->max_dataqueues is the control queue
817          */
818         if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
819                 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
820                 return -1;
821         }
822         virtio_crypto_ctrlq_start(cryptodev);
823
824         return 0;
825 }
826
827 static void
828 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
829 {
830         struct virtio_crypto_hw *hw = dev->data->dev_private;
831
832         PMD_INIT_FUNC_TRACE();
833         VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
834
835         vtpci_cryptodev_reset(hw);
836
837         virtio_crypto_dev_free_mbufs(dev);
838         virtio_crypto_free_queues(dev);
839
840         dev->data->dev_started = 0;
841 }
842
843 static int
844 virtio_crypto_dev_start(struct rte_cryptodev *dev)
845 {
846         struct virtio_crypto_hw *hw = dev->data->dev_private;
847
848         if (dev->data->dev_started)
849                 return 0;
850
851         /* Do final configuration before queue engine starts */
852         virtio_crypto_dataq_start(dev);
853         vtpci_cryptodev_reinit_complete(hw);
854
855         dev->data->dev_started = 1;
856
857         return 0;
858 }
859
860 static void
861 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
862 {
863         uint32_t i;
864         struct virtio_crypto_hw *hw = dev->data->dev_private;
865
866         for (i = 0; i < hw->max_dataqueues; i++) {
867                 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
868                         "and unused buf", i);
869                 VIRTQUEUE_DUMP((struct virtqueue *)
870                         dev->data->queue_pairs[i]);
871
872                 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
873                                 i, dev->data->queue_pairs[i]);
874
875                 virtqueue_detatch_unused(dev->data->queue_pairs[i]);
876
877                 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
878                                         "unused buf", i);
879                 VIRTQUEUE_DUMP(
880                         (struct virtqueue *)dev->data->queue_pairs[i]);
881         }
882 }
883
884 static unsigned int
885 virtio_crypto_sym_get_session_private_size(
886                 struct rte_cryptodev *dev __rte_unused)
887 {
888         PMD_INIT_FUNC_TRACE();
889
890         return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
891 }
892
893 static int
894 virtio_crypto_check_sym_session_paras(
895                 struct rte_cryptodev *dev)
896 {
897         struct virtio_crypto_hw *hw;
898
899         PMD_INIT_FUNC_TRACE();
900
901         if (unlikely(dev == NULL)) {
902                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
903                 return -1;
904         }
905         if (unlikely(dev->data == NULL)) {
906                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
907                 return -1;
908         }
909         hw = dev->data->dev_private;
910         if (unlikely(hw == NULL)) {
911                 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
912                 return -1;
913         }
914         if (unlikely(hw->cvq == NULL)) {
915                 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
916                 return -1;
917         }
918
919         return 0;
920 }
921
922 static int
923 virtio_crypto_check_sym_clear_session_paras(
924                 struct rte_cryptodev *dev,
925                 struct rte_cryptodev_sym_session *sess)
926 {
927         PMD_INIT_FUNC_TRACE();
928
929         if (sess == NULL) {
930                 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
931                 return -1;
932         }
933
934         return virtio_crypto_check_sym_session_paras(dev);
935 }
936
937 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
938
939 static void
940 virtio_crypto_sym_clear_session(
941                 struct rte_cryptodev *dev,
942                 struct rte_cryptodev_sym_session *sess)
943 {
944         struct virtio_crypto_hw *hw;
945         struct virtqueue *vq;
946         struct virtio_crypto_session *session;
947         struct virtio_crypto_op_ctrl_req *ctrl;
948         struct vring_desc *desc;
949         uint8_t *status;
950         uint8_t needed = 1;
951         uint32_t head;
952         uint8_t *malloc_virt_addr;
953         uint64_t malloc_phys_addr;
954         uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
955         uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
956         uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
957
958         PMD_INIT_FUNC_TRACE();
959
960         if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
961                 return;
962
963         hw = dev->data->dev_private;
964         vq = hw->cvq;
965         session = (struct virtio_crypto_session *)get_session_private_data(
966                 sess, cryptodev_virtio_driver_id);
967         if (session == NULL) {
968                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
969                 return;
970         }
971
972         VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
973                         "vq = %p", vq->vq_desc_head_idx, vq);
974
975         if (vq->vq_free_cnt < needed) {
976                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
977                                 "vq->vq_free_cnt = %d is less than %d, "
978                                 "not enough", vq->vq_free_cnt, needed);
979                 return;
980         }
981
982         /*
983          * malloc memory to store information of ctrl request op,
984          * returned status and desc vring
985          */
986         malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
987                 + NUM_ENTRY_SYM_CLEAR_SESSION
988                 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
989         if (malloc_virt_addr == NULL) {
990                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
991                 return;
992         }
993         malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
994
995         /* assign ctrl request op part */
996         ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
997         ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
998         /* default data virtqueue is 0 */
999         ctrl->header.queue_id = 0;
1000         ctrl->u.destroy_session.session_id = session->session_id;
1001
1002         /* status part */
1003         status = &(((struct virtio_crypto_inhdr *)
1004                 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
1005         *status = VIRTIO_CRYPTO_ERR;
1006
1007         /* indirect desc vring part */
1008         desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
1009                 + desc_offset);
1010
1011         /* ctrl request part */
1012         desc[0].addr = malloc_phys_addr;
1013         desc[0].len = len_op_ctrl_req;
1014         desc[0].flags = VRING_DESC_F_NEXT;
1015         desc[0].next = 1;
1016
1017         /* status part */
1018         desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
1019         desc[1].len = len_inhdr;
1020         desc[1].flags = VRING_DESC_F_WRITE;
1021
1022         /* use only a single desc entry */
1023         head = vq->vq_desc_head_idx;
1024         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
1025         vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
1026         vq->vq_ring.desc[head].len
1027                 = NUM_ENTRY_SYM_CLEAR_SESSION
1028                 * sizeof(struct vring_desc);
1029         vq->vq_free_cnt -= needed;
1030
1031         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
1032
1033         vq_update_avail_ring(vq, head);
1034         vq_update_avail_idx(vq);
1035
1036         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
1037                                         vq->vq_queue_index);
1038
1039         virtqueue_notify(vq);
1040
1041         rte_rmb();
1042         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
1043                 rte_rmb();
1044                 usleep(100);
1045         }
1046
1047         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
1048                 uint32_t idx, desc_idx, used_idx;
1049                 struct vring_used_elem *uep;
1050
1051                 used_idx = (uint32_t)(vq->vq_used_cons_idx
1052                                 & (vq->vq_nentries - 1));
1053                 uep = &vq->vq_ring.used->ring[used_idx];
1054                 idx = (uint32_t) uep->id;
1055                 desc_idx = idx;
1056                 while (vq->vq_ring.desc[desc_idx].flags
1057                                 & VRING_DESC_F_NEXT) {
1058                         desc_idx = vq->vq_ring.desc[desc_idx].next;
1059                         vq->vq_free_cnt++;
1060                 }
1061
1062                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
1063                 vq->vq_desc_head_idx = idx;
1064                 vq->vq_used_cons_idx++;
1065                 vq->vq_free_cnt++;
1066         }
1067
1068         if (*status != VIRTIO_CRYPTO_OK) {
1069                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1070                                 "status=%"PRIu32", session_id=%"PRIu64"",
1071                                 *status, session->session_id);
1072                 rte_free(malloc_virt_addr);
1073                 return;
1074         }
1075
1076         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
1077                         "vq->vq_desc_head_idx=%d",
1078                         vq->vq_free_cnt, vq->vq_desc_head_idx);
1079
1080         VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1081                         session->session_id);
1082
1083         memset(sess, 0, sizeof(struct virtio_crypto_session));
1084         rte_free(malloc_virt_addr);
1085 }
1086
1087 static struct rte_crypto_cipher_xform *
1088 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1089 {
1090         do {
1091                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1092                         return &xform->cipher;
1093
1094                 xform = xform->next;
1095         } while (xform);
1096
1097         return NULL;
1098 }
1099
1100 static struct rte_crypto_auth_xform *
1101 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1102 {
1103         do {
1104                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1105                         return &xform->auth;
1106
1107                 xform = xform->next;
1108         } while (xform);
1109
1110         return NULL;
1111 }
1112
1113 /** Get xform chain order */
1114 static int
1115 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1116 {
1117         if (xform == NULL)
1118                 return -1;
1119
1120         /* Cipher Only */
1121         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1122                         xform->next == NULL)
1123                 return VIRTIO_CRYPTO_CMD_CIPHER;
1124
1125         /* Authentication Only */
1126         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1127                         xform->next == NULL)
1128                 return VIRTIO_CRYPTO_CMD_AUTH;
1129
1130         /* Authenticate then Cipher */
1131         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1132                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1133                 return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1134
1135         /* Cipher then Authenticate */
1136         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1137                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1138                 return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1139
1140         return -1;
1141 }
1142
1143 static int
1144 virtio_crypto_sym_pad_cipher_param(
1145                 struct virtio_crypto_cipher_session_para *para,
1146                 struct rte_crypto_cipher_xform *cipher_xform)
1147 {
1148         switch (cipher_xform->algo) {
1149         case RTE_CRYPTO_CIPHER_AES_CBC:
1150                 para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
1151                 break;
1152         default:
1153                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1154                                 "Cipher alg %u", cipher_xform->algo);
1155                 return -1;
1156         }
1157
1158         para->keylen = cipher_xform->key.length;
1159         switch (cipher_xform->op) {
1160         case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1161                 para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1162                 break;
1163         case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1164                 para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1165                 break;
1166         default:
1167                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1168                                         "parameter");
1169                 return -1;
1170         }
1171
1172         return 0;
1173 }
1174
1175 static int
1176 virtio_crypto_sym_pad_auth_param(
1177                 struct virtio_crypto_op_ctrl_req *ctrl,
1178                 struct rte_crypto_auth_xform *auth_xform)
1179 {
1180         uint32_t *algo;
1181         struct virtio_crypto_alg_chain_session_para *para =
1182                 &(ctrl->u.sym_create_session.u.chain.para);
1183
1184         switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1185         case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1186                 algo = &(para->u.hash_param.algo);
1187                 break;
1188         case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1189                 algo = &(para->u.mac_param.algo);
1190                 break;
1191         default:
1192                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1193                         "specified",
1194                         ctrl->u.sym_create_session.u.chain.para.hash_mode);
1195                 return -1;
1196         }
1197
1198         switch (auth_xform->algo) {
1199         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1200                 *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
1201                 break;
1202         default:
1203                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1204                         "Crypto: Undefined Hash algo %u specified",
1205                         auth_xform->algo);
1206                 return -1;
1207         }
1208
1209         return 0;
1210 }
1211
1212 static int
1213 virtio_crypto_sym_pad_op_ctrl_req(
1214                 struct virtio_crypto_op_ctrl_req *ctrl,
1215                 struct rte_crypto_sym_xform *xform, bool is_chainned,
1216                 uint8_t **cipher_key_data, uint8_t **auth_key_data,
1217                 struct virtio_crypto_session *session)
1218 {
1219         int ret;
1220         struct rte_crypto_auth_xform *auth_xform = NULL;
1221         struct rte_crypto_cipher_xform *cipher_xform = NULL;
1222
1223         /* Get cipher xform from crypto xform chain */
1224         cipher_xform = virtio_crypto_get_cipher_xform(xform);
1225         if (cipher_xform) {
1226                 if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
1227                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1228                                 "cipher IV size cannot be longer than %u",
1229                                 VIRTIO_CRYPTO_MAX_IV_SIZE);
1230                         return -1;
1231                 }
1232                 if (is_chainned)
1233                         ret = virtio_crypto_sym_pad_cipher_param(
1234                                 &ctrl->u.sym_create_session.u.chain.para
1235                                                 .cipher_param, cipher_xform);
1236                 else
1237                         ret = virtio_crypto_sym_pad_cipher_param(
1238                                 &ctrl->u.sym_create_session.u.cipher.para,
1239                                 cipher_xform);
1240
1241                 if (ret < 0) {
1242                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1243                                 "pad cipher parameter failed");
1244                         return -1;
1245                 }
1246
1247                 *cipher_key_data = cipher_xform->key.data;
1248
1249                 session->iv.offset = cipher_xform->iv.offset;
1250                 session->iv.length = cipher_xform->iv.length;
1251         }
1252
1253         /* Get auth xform from crypto xform chain */
1254         auth_xform = virtio_crypto_get_auth_xform(xform);
1255         if (auth_xform) {
1256                 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1257                 struct virtio_crypto_alg_chain_session_para *para =
1258                         &(ctrl->u.sym_create_session.u.chain.para);
1259                 if (auth_xform->key.length) {
1260                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1261                         para->u.mac_param.auth_key_len =
1262                                 (uint32_t)auth_xform->key.length;
1263                         para->u.mac_param.hash_result_len =
1264                                 auth_xform->digest_length;
1265
1266                         *auth_key_data = auth_xform->key.data;
1267                 } else {
1268                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1269                         para->u.hash_param.hash_result_len =
1270                                 auth_xform->digest_length;
1271                 }
1272
1273                 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1274                 if (ret < 0) {
1275                         VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1276                                                 "failed");
1277                         return -1;
1278                 }
1279         }
1280
1281         return 0;
1282 }
1283
1284 static int
1285 virtio_crypto_check_sym_configure_session_paras(
1286                 struct rte_cryptodev *dev,
1287                 struct rte_crypto_sym_xform *xform,
1288                 struct rte_cryptodev_sym_session *sym_sess,
1289                 struct rte_mempool *mempool)
1290 {
1291         if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
1292                 unlikely(mempool == NULL)) {
1293                 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1294                 return -1;
1295         }
1296
1297         if (virtio_crypto_check_sym_session_paras(dev) < 0)
1298                 return -1;
1299
1300         return 0;
1301 }
1302
1303 static int
1304 virtio_crypto_sym_configure_session(
1305                 struct rte_cryptodev *dev,
1306                 struct rte_crypto_sym_xform *xform,
1307                 struct rte_cryptodev_sym_session *sess,
1308                 struct rte_mempool *mempool)
1309 {
1310         int ret;
1311         struct virtio_crypto_session crypto_sess;
1312         void *session_private = &crypto_sess;
1313         struct virtio_crypto_session *session;
1314         struct virtio_crypto_op_ctrl_req *ctrl_req;
1315         enum virtio_crypto_cmd_id cmd_id;
1316         uint8_t *cipher_key_data = NULL;
1317         uint8_t *auth_key_data = NULL;
1318         struct virtio_crypto_hw *hw;
1319         struct virtqueue *control_vq;
1320
1321         PMD_INIT_FUNC_TRACE();
1322
1323         ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1324                         sess, mempool);
1325         if (ret < 0) {
1326                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1327                 return ret;
1328         }
1329
1330         if (rte_mempool_get(mempool, &session_private)) {
1331                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1332                         "Couldn't get object from session mempool");
1333                 return -ENOMEM;
1334         }
1335
1336         session = (struct virtio_crypto_session *)session_private;
1337         memset(session, 0, sizeof(struct virtio_crypto_session));
1338         ctrl_req = &session->ctrl;
1339         ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1340         /* FIXME: support multiqueue */
1341         ctrl_req->header.queue_id = 0;
1342
1343         hw = dev->data->dev_private;
1344         control_vq = hw->cvq;
1345
1346         cmd_id = virtio_crypto_get_chain_order(xform);
1347         if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1348                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1349                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1350         if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1351                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1352                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1353
1354         switch (cmd_id) {
1355         case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1356         case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1357                 ctrl_req->u.sym_create_session.op_type
1358                         = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1359
1360                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1361                         xform, true, &cipher_key_data, &auth_key_data, session);
1362                 if (ret < 0) {
1363                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1364                                 "padding sym op ctrl req failed");
1365                         goto error_out;
1366                 }
1367                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1368                         cipher_key_data, auth_key_data, session);
1369                 if (ret < 0) {
1370                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1371                                 "create session failed: %d", ret);
1372                         goto error_out;
1373                 }
1374                 break;
1375         case VIRTIO_CRYPTO_CMD_CIPHER:
1376                 ctrl_req->u.sym_create_session.op_type
1377                         = VIRTIO_CRYPTO_SYM_OP_CIPHER;
1378                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1379                         false, &cipher_key_data, &auth_key_data, session);
1380                 if (ret < 0) {
1381                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1382                                 "padding sym op ctrl req failed");
1383                         goto error_out;
1384                 }
1385                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1386                         cipher_key_data, NULL, session);
1387                 if (ret < 0) {
1388                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1389                                 "create session failed: %d", ret);
1390                         goto error_out;
1391                 }
1392                 break;
1393         default:
1394                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1395                         "Unsupported operation chain order parameter");
1396                 goto error_out;
1397         }
1398
1399         set_session_private_data(sess, dev->driver_id,
1400                 session_private);
1401
1402         return 0;
1403
1404 error_out:
1405         return -1;
1406 }
1407
1408 static void
1409 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1410                 struct rte_cryptodev_info *info)
1411 {
1412         struct virtio_crypto_hw *hw = dev->data->dev_private;
1413
1414         PMD_INIT_FUNC_TRACE();
1415
1416         if (info != NULL) {
1417                 info->driver_id = cryptodev_virtio_driver_id;
1418                 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1419                 info->feature_flags = dev->feature_flags;
1420                 info->max_nb_queue_pairs = hw->max_dataqueues;
1421                 info->sym.max_nb_sessions =
1422                         RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS;
1423                 info->capabilities = hw->virtio_dev_capabilities;
1424         }
1425 }
1426
1427 static int
1428 crypto_virtio_pci_probe(
1429         struct rte_pci_driver *pci_drv __rte_unused,
1430         struct rte_pci_device *pci_dev)
1431 {
1432         struct rte_cryptodev_pmd_init_params init_params = {
1433                 .name = "",
1434                 .socket_id = rte_socket_id(),
1435                 .private_data_size = sizeof(struct virtio_crypto_hw),
1436                 .max_nb_sessions = RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS
1437         };
1438         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1439
1440         VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1441                         pci_dev->addr.bus,
1442                         pci_dev->addr.devid,
1443                         pci_dev->addr.function);
1444
1445         rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1446
1447         return crypto_virtio_create(name, pci_dev, &init_params);
1448 }
1449
1450 static int
1451 crypto_virtio_pci_remove(
1452         struct rte_pci_device *pci_dev __rte_unused)
1453 {
1454         struct rte_cryptodev *cryptodev;
1455         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1456
1457         if (pci_dev == NULL)
1458                 return -EINVAL;
1459
1460         rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1461                         sizeof(cryptodev_name));
1462
1463         cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1464         if (cryptodev == NULL)
1465                 return -ENODEV;
1466
1467         return virtio_crypto_dev_uninit(cryptodev);
1468 }
1469
1470 static struct rte_pci_driver rte_virtio_crypto_driver = {
1471         .id_table = pci_id_virtio_crypto_map,
1472         .drv_flags = 0,
1473         .probe = crypto_virtio_pci_probe,
1474         .remove = crypto_virtio_pci_remove
1475 };
1476
1477 static struct cryptodev_driver virtio_crypto_drv;
1478
1479 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1480 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1481         rte_virtio_crypto_driver.driver,
1482         cryptodev_virtio_driver_id);
1483
1484 RTE_INIT(virtio_crypto_init_log);
1485 static void
1486 virtio_crypto_init_log(void)
1487 {
1488         virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
1489         if (virtio_crypto_logtype_init >= 0)
1490                 rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
1491
1492         virtio_crypto_logtype_session =
1493                 rte_log_register("pmd.crypto.virtio.session");
1494         if (virtio_crypto_logtype_session >= 0)
1495                 rte_log_set_level(virtio_crypto_logtype_session,
1496                                 RTE_LOG_NOTICE);
1497
1498         virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
1499         if (virtio_crypto_logtype_rx >= 0)
1500                 rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
1501
1502         virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
1503         if (virtio_crypto_logtype_tx >= 0)
1504                 rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
1505
1506         virtio_crypto_logtype_driver =
1507                 rte_log_register("pmd.crypto.virtio.driver");
1508         if (virtio_crypto_logtype_driver >= 0)
1509                 rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);
1510 }