cryptodev: change queue pair configure structure
[dpdk.git] / drivers / crypto / virtio / virtio_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 #include <stdbool.h>
5 #include <unistd.h>
6
7 #include <rte_common.h>
8 #include <rte_errno.h>
9 #include <rte_pci.h>
10 #include <rte_bus_pci.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_eal.h>
14
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
18 #include "virtio_crypto_capabilities.h"
19
20 int virtio_crypto_logtype_init;
21 int virtio_crypto_logtype_session;
22 int virtio_crypto_logtype_rx;
23 int virtio_crypto_logtype_tx;
24 int virtio_crypto_logtype_driver;
25
26 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
27                 struct rte_cryptodev_config *config);
28 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
29 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
30 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
31 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
32                 struct rte_cryptodev_info *dev_info);
33 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
34                 struct rte_cryptodev_stats *stats);
35 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
36 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
37                 uint16_t queue_pair_id,
38                 const struct rte_cryptodev_qp_conf *qp_conf,
39                 int socket_id);
40 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
41                 uint16_t queue_pair_id);
42 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
43 static unsigned int virtio_crypto_sym_get_session_private_size(
44                 struct rte_cryptodev *dev);
45 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
46                 struct rte_cryptodev_sym_session *sess);
47 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
48                 struct rte_crypto_sym_xform *xform,
49                 struct rte_cryptodev_sym_session *session,
50                 struct rte_mempool *mp);
51
52 /*
53  * The set of PCI devices this driver supports
54  */
55 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
56         { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
57                                 VIRTIO_CRYPTO_PCI_DEVICEID) },
58         { .vendor_id = 0, /* sentinel */ },
59 };
60
61 static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
62         VIRTIO_SYM_CAPABILITIES,
63         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
64 };
65
66 uint8_t cryptodev_virtio_driver_id;
67
68 #define NUM_ENTRY_SYM_CREATE_SESSION 4
69
70 static int
71 virtio_crypto_send_command(struct virtqueue *vq,
72                 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
73                 uint8_t *auth_key, struct virtio_crypto_session *session)
74 {
75         uint8_t idx = 0;
76         uint8_t needed = 1;
77         uint32_t head = 0;
78         uint32_t len_cipher_key = 0;
79         uint32_t len_auth_key = 0;
80         uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
81         uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
82         uint32_t len_total = 0;
83         uint32_t input_offset = 0;
84         void *virt_addr_started = NULL;
85         phys_addr_t phys_addr_started;
86         struct vring_desc *desc;
87         uint32_t desc_offset;
88         struct virtio_crypto_session_input *input;
89         int ret;
90
91         PMD_INIT_FUNC_TRACE();
92
93         if (session == NULL) {
94                 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
95                 return -EINVAL;
96         }
97         /* cipher only is supported, it is available if auth_key is NULL */
98         if (!cipher_key) {
99                 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
100                 return -EINVAL;
101         }
102
103         head = vq->vq_desc_head_idx;
104         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
105                                         head, vq);
106
107         if (vq->vq_free_cnt < needed) {
108                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
109                 return -ENOSPC;
110         }
111
112         /* calculate the length of cipher key */
113         if (cipher_key) {
114                 switch (ctrl->u.sym_create_session.op_type) {
115                 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
116                         len_cipher_key
117                                 = ctrl->u.sym_create_session.u.cipher
118                                                         .para.keylen;
119                         break;
120                 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
121                         len_cipher_key
122                                 = ctrl->u.sym_create_session.u.chain
123                                         .para.cipher_param.keylen;
124                         break;
125                 default:
126                         VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
127                         return -EINVAL;
128                 }
129         }
130
131         /* calculate the length of auth key */
132         if (auth_key) {
133                 len_auth_key =
134                         ctrl->u.sym_create_session.u.chain.para.u.mac_param
135                                 .auth_key_len;
136         }
137
138         /*
139          * malloc memory to store indirect vring_desc entries, including
140          * ctrl request, cipher key, auth key, session input and desc vring
141          */
142         desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
143                 + len_session_input;
144         virt_addr_started = rte_malloc(NULL,
145                 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
146                         * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
147         if (virt_addr_started == NULL) {
148                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
149                 return -ENOSPC;
150         }
151         phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
152
153         /* address to store indirect vring desc entries */
154         desc = (struct vring_desc *)
155                 ((uint8_t *)virt_addr_started + desc_offset);
156
157         /*  ctrl req part */
158         memcpy(virt_addr_started, ctrl, len_ctrl_req);
159         desc[idx].addr = phys_addr_started;
160         desc[idx].len = len_ctrl_req;
161         desc[idx].flags = VRING_DESC_F_NEXT;
162         desc[idx].next = idx + 1;
163         idx++;
164         len_total += len_ctrl_req;
165         input_offset += len_ctrl_req;
166
167         /* cipher key part */
168         if (len_cipher_key > 0) {
169                 memcpy((uint8_t *)virt_addr_started + len_total,
170                         cipher_key, len_cipher_key);
171
172                 desc[idx].addr = phys_addr_started + len_total;
173                 desc[idx].len = len_cipher_key;
174                 desc[idx].flags = VRING_DESC_F_NEXT;
175                 desc[idx].next = idx + 1;
176                 idx++;
177                 len_total += len_cipher_key;
178                 input_offset += len_cipher_key;
179         }
180
181         /* auth key part */
182         if (len_auth_key > 0) {
183                 memcpy((uint8_t *)virt_addr_started + len_total,
184                         auth_key, len_auth_key);
185
186                 desc[idx].addr = phys_addr_started + len_total;
187                 desc[idx].len = len_auth_key;
188                 desc[idx].flags = VRING_DESC_F_NEXT;
189                 desc[idx].next = idx + 1;
190                 idx++;
191                 len_total += len_auth_key;
192                 input_offset += len_auth_key;
193         }
194
195         /* input part */
196         input = (struct virtio_crypto_session_input *)
197                 ((uint8_t *)virt_addr_started + input_offset);
198         input->status = VIRTIO_CRYPTO_ERR;
199         input->session_id = ~0ULL;
200         desc[idx].addr = phys_addr_started + len_total;
201         desc[idx].len = len_session_input;
202         desc[idx].flags = VRING_DESC_F_WRITE;
203         idx++;
204
205         /* use a single desc entry */
206         vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
207         vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
208         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
209         vq->vq_free_cnt--;
210
211         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
212
213         vq_update_avail_ring(vq, head);
214         vq_update_avail_idx(vq);
215
216         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
217                                         vq->vq_queue_index);
218
219         virtqueue_notify(vq);
220
221         rte_rmb();
222         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
223                 rte_rmb();
224                 usleep(100);
225         }
226
227         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
228                 uint32_t idx, desc_idx, used_idx;
229                 struct vring_used_elem *uep;
230
231                 used_idx = (uint32_t)(vq->vq_used_cons_idx
232                                 & (vq->vq_nentries - 1));
233                 uep = &vq->vq_ring.used->ring[used_idx];
234                 idx = (uint32_t) uep->id;
235                 desc_idx = idx;
236
237                 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
238                         desc_idx = vq->vq_ring.desc[desc_idx].next;
239                         vq->vq_free_cnt++;
240                 }
241
242                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
243                 vq->vq_desc_head_idx = idx;
244
245                 vq->vq_used_cons_idx++;
246                 vq->vq_free_cnt++;
247         }
248
249         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
250                         "vq->vq_desc_head_idx=%d",
251                         vq->vq_free_cnt, vq->vq_desc_head_idx);
252
253         /* get the result */
254         if (input->status != VIRTIO_CRYPTO_OK) {
255                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
256                                 "status=%u, session_id=%" PRIu64 "",
257                                 input->status, input->session_id);
258                 rte_free(virt_addr_started);
259                 ret = -1;
260         } else {
261                 session->session_id = input->session_id;
262
263                 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
264                                 "session_id=%" PRIu64 "", input->session_id);
265                 rte_free(virt_addr_started);
266                 ret = 0;
267         }
268
269         return ret;
270 }
271
272 void
273 virtio_crypto_queue_release(struct virtqueue *vq)
274 {
275         struct virtio_crypto_hw *hw;
276
277         PMD_INIT_FUNC_TRACE();
278
279         if (vq) {
280                 hw = vq->hw;
281                 /* Select and deactivate the queue */
282                 VTPCI_OPS(hw)->del_queue(hw, vq);
283
284                 rte_memzone_free(vq->mz);
285                 rte_mempool_free(vq->mpool);
286                 rte_free(vq);
287         }
288 }
289
290 #define MPOOL_MAX_NAME_SZ 32
291
292 int
293 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
294                 int queue_type,
295                 uint16_t vtpci_queue_idx,
296                 uint16_t nb_desc,
297                 int socket_id,
298                 struct virtqueue **pvq)
299 {
300         char vq_name[VIRTQUEUE_MAX_NAME_SZ];
301         char mpool_name[MPOOL_MAX_NAME_SZ];
302         const struct rte_memzone *mz;
303         unsigned int vq_size, size;
304         struct virtio_crypto_hw *hw = dev->data->dev_private;
305         struct virtqueue *vq = NULL;
306         uint32_t i = 0;
307         uint32_t j;
308
309         PMD_INIT_FUNC_TRACE();
310
311         VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
312
313         /*
314          * Read the virtqueue size from the Queue Size field
315          * Always power of 2 and if 0 virtqueue does not exist
316          */
317         vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
318         if (vq_size == 0) {
319                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
320                 return -EINVAL;
321         }
322         VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
323
324         if (!rte_is_power_of_2(vq_size)) {
325                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
326                 return -EINVAL;
327         }
328
329         if (queue_type == VTCRYPTO_DATAQ) {
330                 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
331                                 dev->data->dev_id, vtpci_queue_idx);
332                 snprintf(mpool_name, sizeof(mpool_name),
333                                 "dev%d_dataqueue%d_mpool",
334                                 dev->data->dev_id, vtpci_queue_idx);
335         } else if (queue_type == VTCRYPTO_CTRLQ) {
336                 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
337                                 dev->data->dev_id);
338                 snprintf(mpool_name, sizeof(mpool_name),
339                                 "dev%d_controlqueue_mpool",
340                                 dev->data->dev_id);
341         }
342         size = RTE_ALIGN_CEIL(sizeof(*vq) +
343                                 vq_size * sizeof(struct vq_desc_extra),
344                                 RTE_CACHE_LINE_SIZE);
345         vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
346                                 socket_id);
347         if (vq == NULL) {
348                 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
349                 return -ENOMEM;
350         }
351
352         if (queue_type == VTCRYPTO_DATAQ) {
353                 /* pre-allocate a mempool and use it in the data plane to
354                  * improve performance
355                  */
356                 vq->mpool = rte_mempool_lookup(mpool_name);
357                 if (vq->mpool == NULL)
358                         vq->mpool = rte_mempool_create(mpool_name,
359                                         vq_size,
360                                         sizeof(struct virtio_crypto_op_cookie),
361                                         RTE_CACHE_LINE_SIZE, 0,
362                                         NULL, NULL, NULL, NULL, socket_id,
363                                         0);
364                 if (!vq->mpool) {
365                         VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
366                                         "Cannot create mempool");
367                         goto mpool_create_err;
368                 }
369                 for (i = 0; i < vq_size; i++) {
370                         vq->vq_descx[i].cookie =
371                                 rte_zmalloc("crypto PMD op cookie pointer",
372                                         sizeof(struct virtio_crypto_op_cookie),
373                                         RTE_CACHE_LINE_SIZE);
374                         if (vq->vq_descx[i].cookie == NULL) {
375                                 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
376                                                 "alloc mem for cookie");
377                                 goto cookie_alloc_err;
378                         }
379                 }
380         }
381
382         vq->hw = hw;
383         vq->dev_id = dev->data->dev_id;
384         vq->vq_queue_index = vtpci_queue_idx;
385         vq->vq_nentries = vq_size;
386
387         /*
388          * Using part of the vring entries is permitted, but the maximum
389          * is vq_size
390          */
391         if (nb_desc == 0 || nb_desc > vq_size)
392                 nb_desc = vq_size;
393         vq->vq_free_cnt = nb_desc;
394
395         /*
396          * Reserve a memzone for vring elements
397          */
398         size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
399         vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
400         VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
401                         (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
402                         size, vq->vq_ring_size);
403
404         mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
405                         socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
406         if (mz == NULL) {
407                 if (rte_errno == EEXIST)
408                         mz = rte_memzone_lookup(vq_name);
409                 if (mz == NULL) {
410                         VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
411                         goto mz_reserve_err;
412                 }
413         }
414
415         /*
416          * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
417          * and only accepts 32 bit page frame number.
418          * Check if the allocated physical memory exceeds 16TB.
419          */
420         if ((mz->phys_addr + vq->vq_ring_size - 1)
421                                 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
422                 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
423                                         "above 16TB!");
424                 goto vring_addr_err;
425         }
426
427         memset(mz->addr, 0, sizeof(mz->len));
428         vq->mz = mz;
429         vq->vq_ring_mem = mz->phys_addr;
430         vq->vq_ring_virt_mem = mz->addr;
431         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
432                                         (uint64_t)mz->phys_addr);
433         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
434                                         (uint64_t)(uintptr_t)mz->addr);
435
436         *pvq = vq;
437
438         return 0;
439
440 vring_addr_err:
441         rte_memzone_free(mz);
442 mz_reserve_err:
443 cookie_alloc_err:
444         rte_mempool_free(vq->mpool);
445         if (i != 0) {
446                 for (j = 0; j < i; j++)
447                         rte_free(vq->vq_descx[j].cookie);
448         }
449 mpool_create_err:
450         rte_free(vq);
451         return -ENOMEM;
452 }
453
454 static int
455 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
456 {
457         int ret;
458         struct virtqueue *vq;
459         struct virtio_crypto_hw *hw = dev->data->dev_private;
460
461         /* if virtio device has started, do not touch the virtqueues */
462         if (dev->data->dev_started)
463                 return 0;
464
465         PMD_INIT_FUNC_TRACE();
466
467         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
468                         0, SOCKET_ID_ANY, &vq);
469         if (ret < 0) {
470                 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
471                 return ret;
472         }
473
474         hw->cvq = vq;
475
476         return 0;
477 }
478
479 static void
480 virtio_crypto_free_queues(struct rte_cryptodev *dev)
481 {
482         unsigned int i;
483         struct virtio_crypto_hw *hw = dev->data->dev_private;
484
485         PMD_INIT_FUNC_TRACE();
486
487         /* control queue release */
488         virtio_crypto_queue_release(hw->cvq);
489
490         /* data queue release */
491         for (i = 0; i < hw->max_dataqueues; i++)
492                 virtio_crypto_queue_release(dev->data->queue_pairs[i]);
493 }
494
495 static int
496 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
497 {
498         return 0;
499 }
500
501 /*
502  * dev_ops for virtio, bare necessities for basic operation
503  */
504 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
505         /* Device related operations */
506         .dev_configure                   = virtio_crypto_dev_configure,
507         .dev_start                       = virtio_crypto_dev_start,
508         .dev_stop                        = virtio_crypto_dev_stop,
509         .dev_close                       = virtio_crypto_dev_close,
510         .dev_infos_get                   = virtio_crypto_dev_info_get,
511
512         .stats_get                       = virtio_crypto_dev_stats_get,
513         .stats_reset                     = virtio_crypto_dev_stats_reset,
514
515         .queue_pair_setup                = virtio_crypto_qp_setup,
516         .queue_pair_release              = virtio_crypto_qp_release,
517         .queue_pair_count                = NULL,
518
519         /* Crypto related operations */
520         .sym_session_get_size           = virtio_crypto_sym_get_session_private_size,
521         .sym_session_configure          = virtio_crypto_sym_configure_session,
522         .sym_session_clear              = virtio_crypto_sym_clear_session
523 };
524
525 static void
526 virtio_crypto_update_stats(struct rte_cryptodev *dev,
527                 struct rte_cryptodev_stats *stats)
528 {
529         unsigned int i;
530         struct virtio_crypto_hw *hw = dev->data->dev_private;
531
532         PMD_INIT_FUNC_TRACE();
533
534         if (stats == NULL) {
535                 VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
536                 return;
537         }
538
539         for (i = 0; i < hw->max_dataqueues; i++) {
540                 const struct virtqueue *data_queue
541                         = dev->data->queue_pairs[i];
542                 if (data_queue == NULL)
543                         continue;
544
545                 stats->enqueued_count += data_queue->packets_sent_total;
546                 stats->enqueue_err_count += data_queue->packets_sent_failed;
547
548                 stats->dequeued_count += data_queue->packets_received_total;
549                 stats->dequeue_err_count
550                         += data_queue->packets_received_failed;
551         }
552 }
553
554 static void
555 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
556                 struct rte_cryptodev_stats *stats)
557 {
558         PMD_INIT_FUNC_TRACE();
559
560         virtio_crypto_update_stats(dev, stats);
561 }
562
563 static void
564 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
565 {
566         unsigned int i;
567         struct virtio_crypto_hw *hw = dev->data->dev_private;
568
569         PMD_INIT_FUNC_TRACE();
570
571         for (i = 0; i < hw->max_dataqueues; i++) {
572                 struct virtqueue *data_queue = dev->data->queue_pairs[i];
573                 if (data_queue == NULL)
574                         continue;
575
576                 data_queue->packets_sent_total = 0;
577                 data_queue->packets_sent_failed = 0;
578
579                 data_queue->packets_received_total = 0;
580                 data_queue->packets_received_failed = 0;
581         }
582 }
583
584 static int
585 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
586                 const struct rte_cryptodev_qp_conf *qp_conf,
587                 int socket_id)
588 {
589         int ret;
590         struct virtqueue *vq;
591
592         PMD_INIT_FUNC_TRACE();
593
594         /* if virtio dev is started, do not touch the virtqueues */
595         if (dev->data->dev_started)
596                 return 0;
597
598         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
599                         qp_conf->nb_descriptors, socket_id, &vq);
600         if (ret < 0) {
601                 VIRTIO_CRYPTO_INIT_LOG_ERR(
602                         "virtio crypto data queue initialization failed\n");
603                 return ret;
604         }
605
606         dev->data->queue_pairs[queue_pair_id] = vq;
607
608         return 0;
609 }
610
611 static int
612 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
613 {
614         struct virtqueue *vq
615                 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
616
617         PMD_INIT_FUNC_TRACE();
618
619         if (vq == NULL) {
620                 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
621                 return 0;
622         }
623
624         virtio_crypto_queue_release(vq);
625         return 0;
626 }
627
628 static int
629 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
630 {
631         uint64_t host_features;
632
633         PMD_INIT_FUNC_TRACE();
634
635         /* Prepare guest_features: feature that driver wants to support */
636         VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
637                 req_features);
638
639         /* Read device(host) feature bits */
640         host_features = VTPCI_OPS(hw)->get_features(hw);
641         VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
642                 host_features);
643
644         /*
645          * Negotiate features: Subset of device feature bits are written back
646          * guest feature bits.
647          */
648         hw->guest_features = req_features;
649         hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
650                                                         host_features);
651         VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
652                 hw->guest_features);
653
654         if (hw->modern) {
655                 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
656                         VIRTIO_CRYPTO_INIT_LOG_ERR(
657                                 "VIRTIO_F_VERSION_1 features is not enabled.");
658                         return -1;
659                 }
660                 vtpci_cryptodev_set_status(hw,
661                         VIRTIO_CONFIG_STATUS_FEATURES_OK);
662                 if (!(vtpci_cryptodev_get_status(hw) &
663                         VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
664                         VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
665                                                 "status!");
666                         return -1;
667                 }
668         }
669
670         hw->req_guest_features = req_features;
671
672         return 0;
673 }
674
675 /* reset device and renegotiate features if needed */
676 static int
677 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
678         uint64_t req_features)
679 {
680         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
681         struct virtio_crypto_config local_config;
682         struct virtio_crypto_config *config = &local_config;
683
684         PMD_INIT_FUNC_TRACE();
685
686         /* Reset the device although not necessary at startup */
687         vtpci_cryptodev_reset(hw);
688
689         /* Tell the host we've noticed this device. */
690         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
691
692         /* Tell the host we've known how to drive the device. */
693         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
694         if (virtio_negotiate_features(hw, req_features) < 0)
695                 return -1;
696
697         /* Get status of the device */
698         vtpci_read_cryptodev_config(hw,
699                 offsetof(struct virtio_crypto_config, status),
700                 &config->status, sizeof(config->status));
701         if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
702                 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
703                                 "not ready");
704                 return -1;
705         }
706
707         /* Get number of data queues */
708         vtpci_read_cryptodev_config(hw,
709                 offsetof(struct virtio_crypto_config, max_dataqueues),
710                 &config->max_dataqueues,
711                 sizeof(config->max_dataqueues));
712         hw->max_dataqueues = config->max_dataqueues;
713
714         VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
715                 hw->max_dataqueues);
716
717         return 0;
718 }
719
720 /*
721  * This function is based on probe() function
722  * It returns 0 on success.
723  */
724 static int
725 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
726                 struct rte_cryptodev_pmd_init_params *init_params)
727 {
728         struct rte_cryptodev *cryptodev;
729         struct virtio_crypto_hw *hw;
730
731         PMD_INIT_FUNC_TRACE();
732
733         cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
734                                         init_params);
735         if (cryptodev == NULL)
736                 return -ENODEV;
737
738         cryptodev->driver_id = cryptodev_virtio_driver_id;
739         cryptodev->dev_ops = &virtio_crypto_dev_ops;
740
741         cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
742         cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
743
744         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
745                 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
746
747         hw = cryptodev->data->dev_private;
748         hw->dev_id = cryptodev->data->dev_id;
749         hw->virtio_dev_capabilities = virtio_capabilities;
750
751         VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
752                 cryptodev->data->dev_id, pci_dev->id.vendor_id,
753                 pci_dev->id.device_id);
754
755         /* pci device init */
756         if (vtpci_cryptodev_init(pci_dev, hw))
757                 return -1;
758
759         if (virtio_crypto_init_device(cryptodev,
760                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
761                 return -1;
762
763         return 0;
764 }
765
766 static int
767 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
768 {
769         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
770
771         PMD_INIT_FUNC_TRACE();
772
773         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
774                 return -EPERM;
775
776         if (cryptodev->data->dev_started) {
777                 virtio_crypto_dev_stop(cryptodev);
778                 virtio_crypto_dev_close(cryptodev);
779         }
780
781         cryptodev->dev_ops = NULL;
782         cryptodev->enqueue_burst = NULL;
783         cryptodev->dequeue_burst = NULL;
784
785         /* release control queue */
786         virtio_crypto_queue_release(hw->cvq);
787
788         rte_free(cryptodev->data);
789         cryptodev->data = NULL;
790
791         VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
792
793         return 0;
794 }
795
796 static int
797 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
798         struct rte_cryptodev_config *config __rte_unused)
799 {
800         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
801
802         PMD_INIT_FUNC_TRACE();
803
804         if (virtio_crypto_init_device(cryptodev,
805                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
806                 return -1;
807
808         /* setup control queue
809          * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
810          * config->max_dataqueues is the control queue
811          */
812         if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
813                 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
814                 return -1;
815         }
816         virtio_crypto_ctrlq_start(cryptodev);
817
818         return 0;
819 }
820
821 static void
822 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
823 {
824         struct virtio_crypto_hw *hw = dev->data->dev_private;
825
826         PMD_INIT_FUNC_TRACE();
827         VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
828
829         vtpci_cryptodev_reset(hw);
830
831         virtio_crypto_dev_free_mbufs(dev);
832         virtio_crypto_free_queues(dev);
833
834         dev->data->dev_started = 0;
835 }
836
837 static int
838 virtio_crypto_dev_start(struct rte_cryptodev *dev)
839 {
840         struct virtio_crypto_hw *hw = dev->data->dev_private;
841
842         if (dev->data->dev_started)
843                 return 0;
844
845         /* Do final configuration before queue engine starts */
846         virtio_crypto_dataq_start(dev);
847         vtpci_cryptodev_reinit_complete(hw);
848
849         dev->data->dev_started = 1;
850
851         return 0;
852 }
853
854 static void
855 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
856 {
857         uint32_t i;
858         struct virtio_crypto_hw *hw = dev->data->dev_private;
859
860         for (i = 0; i < hw->max_dataqueues; i++) {
861                 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
862                         "and unused buf", i);
863                 VIRTQUEUE_DUMP((struct virtqueue *)
864                         dev->data->queue_pairs[i]);
865
866                 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
867                                 i, dev->data->queue_pairs[i]);
868
869                 virtqueue_detatch_unused(dev->data->queue_pairs[i]);
870
871                 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
872                                         "unused buf", i);
873                 VIRTQUEUE_DUMP(
874                         (struct virtqueue *)dev->data->queue_pairs[i]);
875         }
876 }
877
878 static unsigned int
879 virtio_crypto_sym_get_session_private_size(
880                 struct rte_cryptodev *dev __rte_unused)
881 {
882         PMD_INIT_FUNC_TRACE();
883
884         return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
885 }
886
887 static int
888 virtio_crypto_check_sym_session_paras(
889                 struct rte_cryptodev *dev)
890 {
891         struct virtio_crypto_hw *hw;
892
893         PMD_INIT_FUNC_TRACE();
894
895         if (unlikely(dev == NULL)) {
896                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
897                 return -1;
898         }
899         if (unlikely(dev->data == NULL)) {
900                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
901                 return -1;
902         }
903         hw = dev->data->dev_private;
904         if (unlikely(hw == NULL)) {
905                 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
906                 return -1;
907         }
908         if (unlikely(hw->cvq == NULL)) {
909                 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
910                 return -1;
911         }
912
913         return 0;
914 }
915
916 static int
917 virtio_crypto_check_sym_clear_session_paras(
918                 struct rte_cryptodev *dev,
919                 struct rte_cryptodev_sym_session *sess)
920 {
921         PMD_INIT_FUNC_TRACE();
922
923         if (sess == NULL) {
924                 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
925                 return -1;
926         }
927
928         return virtio_crypto_check_sym_session_paras(dev);
929 }
930
931 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
932
933 static void
934 virtio_crypto_sym_clear_session(
935                 struct rte_cryptodev *dev,
936                 struct rte_cryptodev_sym_session *sess)
937 {
938         struct virtio_crypto_hw *hw;
939         struct virtqueue *vq;
940         struct virtio_crypto_session *session;
941         struct virtio_crypto_op_ctrl_req *ctrl;
942         struct vring_desc *desc;
943         uint8_t *status;
944         uint8_t needed = 1;
945         uint32_t head;
946         uint8_t *malloc_virt_addr;
947         uint64_t malloc_phys_addr;
948         uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
949         uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
950         uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
951
952         PMD_INIT_FUNC_TRACE();
953
954         if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
955                 return;
956
957         hw = dev->data->dev_private;
958         vq = hw->cvq;
959         session = (struct virtio_crypto_session *)get_sym_session_private_data(
960                 sess, cryptodev_virtio_driver_id);
961         if (session == NULL) {
962                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
963                 return;
964         }
965
966         VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
967                         "vq = %p", vq->vq_desc_head_idx, vq);
968
969         if (vq->vq_free_cnt < needed) {
970                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
971                                 "vq->vq_free_cnt = %d is less than %d, "
972                                 "not enough", vq->vq_free_cnt, needed);
973                 return;
974         }
975
976         /*
977          * malloc memory to store information of ctrl request op,
978          * returned status and desc vring
979          */
980         malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
981                 + NUM_ENTRY_SYM_CLEAR_SESSION
982                 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
983         if (malloc_virt_addr == NULL) {
984                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
985                 return;
986         }
987         malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
988
989         /* assign ctrl request op part */
990         ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
991         ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
992         /* default data virtqueue is 0 */
993         ctrl->header.queue_id = 0;
994         ctrl->u.destroy_session.session_id = session->session_id;
995
996         /* status part */
997         status = &(((struct virtio_crypto_inhdr *)
998                 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
999         *status = VIRTIO_CRYPTO_ERR;
1000
1001         /* indirect desc vring part */
1002         desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
1003                 + desc_offset);
1004
1005         /* ctrl request part */
1006         desc[0].addr = malloc_phys_addr;
1007         desc[0].len = len_op_ctrl_req;
1008         desc[0].flags = VRING_DESC_F_NEXT;
1009         desc[0].next = 1;
1010
1011         /* status part */
1012         desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
1013         desc[1].len = len_inhdr;
1014         desc[1].flags = VRING_DESC_F_WRITE;
1015
1016         /* use only a single desc entry */
1017         head = vq->vq_desc_head_idx;
1018         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
1019         vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
1020         vq->vq_ring.desc[head].len
1021                 = NUM_ENTRY_SYM_CLEAR_SESSION
1022                 * sizeof(struct vring_desc);
1023         vq->vq_free_cnt -= needed;
1024
1025         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
1026
1027         vq_update_avail_ring(vq, head);
1028         vq_update_avail_idx(vq);
1029
1030         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
1031                                         vq->vq_queue_index);
1032
1033         virtqueue_notify(vq);
1034
1035         rte_rmb();
1036         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
1037                 rte_rmb();
1038                 usleep(100);
1039         }
1040
1041         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
1042                 uint32_t idx, desc_idx, used_idx;
1043                 struct vring_used_elem *uep;
1044
1045                 used_idx = (uint32_t)(vq->vq_used_cons_idx
1046                                 & (vq->vq_nentries - 1));
1047                 uep = &vq->vq_ring.used->ring[used_idx];
1048                 idx = (uint32_t) uep->id;
1049                 desc_idx = idx;
1050                 while (vq->vq_ring.desc[desc_idx].flags
1051                                 & VRING_DESC_F_NEXT) {
1052                         desc_idx = vq->vq_ring.desc[desc_idx].next;
1053                         vq->vq_free_cnt++;
1054                 }
1055
1056                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
1057                 vq->vq_desc_head_idx = idx;
1058                 vq->vq_used_cons_idx++;
1059                 vq->vq_free_cnt++;
1060         }
1061
1062         if (*status != VIRTIO_CRYPTO_OK) {
1063                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1064                                 "status=%"PRIu32", session_id=%"PRIu64"",
1065                                 *status, session->session_id);
1066                 rte_free(malloc_virt_addr);
1067                 return;
1068         }
1069
1070         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
1071                         "vq->vq_desc_head_idx=%d",
1072                         vq->vq_free_cnt, vq->vq_desc_head_idx);
1073
1074         VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1075                         session->session_id);
1076
1077         memset(session, 0, sizeof(struct virtio_crypto_session));
1078         struct rte_mempool *sess_mp = rte_mempool_from_obj(session);
1079         set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL);
1080         rte_mempool_put(sess_mp, session);
1081         rte_free(malloc_virt_addr);
1082 }
1083
1084 static struct rte_crypto_cipher_xform *
1085 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1086 {
1087         do {
1088                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1089                         return &xform->cipher;
1090
1091                 xform = xform->next;
1092         } while (xform);
1093
1094         return NULL;
1095 }
1096
1097 static struct rte_crypto_auth_xform *
1098 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1099 {
1100         do {
1101                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1102                         return &xform->auth;
1103
1104                 xform = xform->next;
1105         } while (xform);
1106
1107         return NULL;
1108 }
1109
1110 /** Get xform chain order */
1111 static int
1112 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1113 {
1114         if (xform == NULL)
1115                 return -1;
1116
1117         /* Cipher Only */
1118         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1119                         xform->next == NULL)
1120                 return VIRTIO_CRYPTO_CMD_CIPHER;
1121
1122         /* Authentication Only */
1123         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1124                         xform->next == NULL)
1125                 return VIRTIO_CRYPTO_CMD_AUTH;
1126
1127         /* Authenticate then Cipher */
1128         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1129                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1130                 return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1131
1132         /* Cipher then Authenticate */
1133         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1134                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1135                 return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1136
1137         return -1;
1138 }
1139
1140 static int
1141 virtio_crypto_sym_pad_cipher_param(
1142                 struct virtio_crypto_cipher_session_para *para,
1143                 struct rte_crypto_cipher_xform *cipher_xform)
1144 {
1145         switch (cipher_xform->algo) {
1146         case RTE_CRYPTO_CIPHER_AES_CBC:
1147                 para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
1148                 break;
1149         default:
1150                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1151                                 "Cipher alg %u", cipher_xform->algo);
1152                 return -1;
1153         }
1154
1155         para->keylen = cipher_xform->key.length;
1156         switch (cipher_xform->op) {
1157         case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1158                 para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1159                 break;
1160         case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1161                 para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1162                 break;
1163         default:
1164                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1165                                         "parameter");
1166                 return -1;
1167         }
1168
1169         return 0;
1170 }
1171
1172 static int
1173 virtio_crypto_sym_pad_auth_param(
1174                 struct virtio_crypto_op_ctrl_req *ctrl,
1175                 struct rte_crypto_auth_xform *auth_xform)
1176 {
1177         uint32_t *algo;
1178         struct virtio_crypto_alg_chain_session_para *para =
1179                 &(ctrl->u.sym_create_session.u.chain.para);
1180
1181         switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1182         case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1183                 algo = &(para->u.hash_param.algo);
1184                 break;
1185         case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1186                 algo = &(para->u.mac_param.algo);
1187                 break;
1188         default:
1189                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1190                         "specified",
1191                         ctrl->u.sym_create_session.u.chain.para.hash_mode);
1192                 return -1;
1193         }
1194
1195         switch (auth_xform->algo) {
1196         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1197                 *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
1198                 break;
1199         default:
1200                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1201                         "Crypto: Undefined Hash algo %u specified",
1202                         auth_xform->algo);
1203                 return -1;
1204         }
1205
1206         return 0;
1207 }
1208
1209 static int
1210 virtio_crypto_sym_pad_op_ctrl_req(
1211                 struct virtio_crypto_op_ctrl_req *ctrl,
1212                 struct rte_crypto_sym_xform *xform, bool is_chainned,
1213                 uint8_t **cipher_key_data, uint8_t **auth_key_data,
1214                 struct virtio_crypto_session *session)
1215 {
1216         int ret;
1217         struct rte_crypto_auth_xform *auth_xform = NULL;
1218         struct rte_crypto_cipher_xform *cipher_xform = NULL;
1219
1220         /* Get cipher xform from crypto xform chain */
1221         cipher_xform = virtio_crypto_get_cipher_xform(xform);
1222         if (cipher_xform) {
1223                 if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
1224                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1225                                 "cipher IV size cannot be longer than %u",
1226                                 VIRTIO_CRYPTO_MAX_IV_SIZE);
1227                         return -1;
1228                 }
1229                 if (is_chainned)
1230                         ret = virtio_crypto_sym_pad_cipher_param(
1231                                 &ctrl->u.sym_create_session.u.chain.para
1232                                                 .cipher_param, cipher_xform);
1233                 else
1234                         ret = virtio_crypto_sym_pad_cipher_param(
1235                                 &ctrl->u.sym_create_session.u.cipher.para,
1236                                 cipher_xform);
1237
1238                 if (ret < 0) {
1239                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1240                                 "pad cipher parameter failed");
1241                         return -1;
1242                 }
1243
1244                 *cipher_key_data = cipher_xform->key.data;
1245
1246                 session->iv.offset = cipher_xform->iv.offset;
1247                 session->iv.length = cipher_xform->iv.length;
1248         }
1249
1250         /* Get auth xform from crypto xform chain */
1251         auth_xform = virtio_crypto_get_auth_xform(xform);
1252         if (auth_xform) {
1253                 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1254                 struct virtio_crypto_alg_chain_session_para *para =
1255                         &(ctrl->u.sym_create_session.u.chain.para);
1256                 if (auth_xform->key.length) {
1257                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1258                         para->u.mac_param.auth_key_len =
1259                                 (uint32_t)auth_xform->key.length;
1260                         para->u.mac_param.hash_result_len =
1261                                 auth_xform->digest_length;
1262
1263                         *auth_key_data = auth_xform->key.data;
1264                 } else {
1265                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1266                         para->u.hash_param.hash_result_len =
1267                                 auth_xform->digest_length;
1268                 }
1269
1270                 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1271                 if (ret < 0) {
1272                         VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1273                                                 "failed");
1274                         return -1;
1275                 }
1276         }
1277
1278         return 0;
1279 }
1280
1281 static int
1282 virtio_crypto_check_sym_configure_session_paras(
1283                 struct rte_cryptodev *dev,
1284                 struct rte_crypto_sym_xform *xform,
1285                 struct rte_cryptodev_sym_session *sym_sess,
1286                 struct rte_mempool *mempool)
1287 {
1288         if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
1289                 unlikely(mempool == NULL)) {
1290                 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1291                 return -1;
1292         }
1293
1294         if (virtio_crypto_check_sym_session_paras(dev) < 0)
1295                 return -1;
1296
1297         return 0;
1298 }
1299
1300 static int
1301 virtio_crypto_sym_configure_session(
1302                 struct rte_cryptodev *dev,
1303                 struct rte_crypto_sym_xform *xform,
1304                 struct rte_cryptodev_sym_session *sess,
1305                 struct rte_mempool *mempool)
1306 {
1307         int ret;
1308         struct virtio_crypto_session crypto_sess;
1309         void *session_private = &crypto_sess;
1310         struct virtio_crypto_session *session;
1311         struct virtio_crypto_op_ctrl_req *ctrl_req;
1312         enum virtio_crypto_cmd_id cmd_id;
1313         uint8_t *cipher_key_data = NULL;
1314         uint8_t *auth_key_data = NULL;
1315         struct virtio_crypto_hw *hw;
1316         struct virtqueue *control_vq;
1317
1318         PMD_INIT_FUNC_TRACE();
1319
1320         ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1321                         sess, mempool);
1322         if (ret < 0) {
1323                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1324                 return ret;
1325         }
1326
1327         if (rte_mempool_get(mempool, &session_private)) {
1328                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1329                         "Couldn't get object from session mempool");
1330                 return -ENOMEM;
1331         }
1332
1333         session = (struct virtio_crypto_session *)session_private;
1334         memset(session, 0, sizeof(struct virtio_crypto_session));
1335         ctrl_req = &session->ctrl;
1336         ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1337         /* FIXME: support multiqueue */
1338         ctrl_req->header.queue_id = 0;
1339
1340         hw = dev->data->dev_private;
1341         control_vq = hw->cvq;
1342
1343         cmd_id = virtio_crypto_get_chain_order(xform);
1344         if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1345                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1346                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1347         if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1348                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1349                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1350
1351         switch (cmd_id) {
1352         case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1353         case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1354                 ctrl_req->u.sym_create_session.op_type
1355                         = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1356
1357                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1358                         xform, true, &cipher_key_data, &auth_key_data, session);
1359                 if (ret < 0) {
1360                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1361                                 "padding sym op ctrl req failed");
1362                         goto error_out;
1363                 }
1364                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1365                         cipher_key_data, auth_key_data, session);
1366                 if (ret < 0) {
1367                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1368                                 "create session failed: %d", ret);
1369                         goto error_out;
1370                 }
1371                 break;
1372         case VIRTIO_CRYPTO_CMD_CIPHER:
1373                 ctrl_req->u.sym_create_session.op_type
1374                         = VIRTIO_CRYPTO_SYM_OP_CIPHER;
1375                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1376                         false, &cipher_key_data, &auth_key_data, session);
1377                 if (ret < 0) {
1378                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1379                                 "padding sym op ctrl req failed");
1380                         goto error_out;
1381                 }
1382                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1383                         cipher_key_data, NULL, session);
1384                 if (ret < 0) {
1385                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1386                                 "create session failed: %d", ret);
1387                         goto error_out;
1388                 }
1389                 break;
1390         default:
1391                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1392                         "Unsupported operation chain order parameter");
1393                 goto error_out;
1394         }
1395
1396         set_sym_session_private_data(sess, dev->driver_id,
1397                 session_private);
1398
1399         return 0;
1400
1401 error_out:
1402         return -1;
1403 }
1404
1405 static void
1406 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1407                 struct rte_cryptodev_info *info)
1408 {
1409         struct virtio_crypto_hw *hw = dev->data->dev_private;
1410
1411         PMD_INIT_FUNC_TRACE();
1412
1413         if (info != NULL) {
1414                 info->driver_id = cryptodev_virtio_driver_id;
1415                 info->feature_flags = dev->feature_flags;
1416                 info->max_nb_queue_pairs = hw->max_dataqueues;
1417                 /* No limit of number of sessions */
1418                 info->sym.max_nb_sessions = 0;
1419                 info->capabilities = hw->virtio_dev_capabilities;
1420         }
1421 }
1422
1423 static int
1424 crypto_virtio_pci_probe(
1425         struct rte_pci_driver *pci_drv __rte_unused,
1426         struct rte_pci_device *pci_dev)
1427 {
1428         struct rte_cryptodev_pmd_init_params init_params = {
1429                 .name = "",
1430                 .socket_id = rte_socket_id(),
1431                 .private_data_size = sizeof(struct virtio_crypto_hw)
1432         };
1433         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1434
1435         VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1436                         pci_dev->addr.bus,
1437                         pci_dev->addr.devid,
1438                         pci_dev->addr.function);
1439
1440         rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1441
1442         return crypto_virtio_create(name, pci_dev, &init_params);
1443 }
1444
1445 static int
1446 crypto_virtio_pci_remove(
1447         struct rte_pci_device *pci_dev __rte_unused)
1448 {
1449         struct rte_cryptodev *cryptodev;
1450         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1451
1452         if (pci_dev == NULL)
1453                 return -EINVAL;
1454
1455         rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1456                         sizeof(cryptodev_name));
1457
1458         cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1459         if (cryptodev == NULL)
1460                 return -ENODEV;
1461
1462         return virtio_crypto_dev_uninit(cryptodev);
1463 }
1464
1465 static struct rte_pci_driver rte_virtio_crypto_driver = {
1466         .id_table = pci_id_virtio_crypto_map,
1467         .drv_flags = 0,
1468         .probe = crypto_virtio_pci_probe,
1469         .remove = crypto_virtio_pci_remove
1470 };
1471
1472 static struct cryptodev_driver virtio_crypto_drv;
1473
1474 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1475 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1476         rte_virtio_crypto_driver.driver,
1477         cryptodev_virtio_driver_id);
1478
1479 RTE_INIT(virtio_crypto_init_log)
1480 {
1481         virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
1482         if (virtio_crypto_logtype_init >= 0)
1483                 rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
1484
1485         virtio_crypto_logtype_session =
1486                 rte_log_register("pmd.crypto.virtio.session");
1487         if (virtio_crypto_logtype_session >= 0)
1488                 rte_log_set_level(virtio_crypto_logtype_session,
1489                                 RTE_LOG_NOTICE);
1490
1491         virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
1492         if (virtio_crypto_logtype_rx >= 0)
1493                 rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
1494
1495         virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
1496         if (virtio_crypto_logtype_tx >= 0)
1497                 rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
1498
1499         virtio_crypto_logtype_driver =
1500                 rte_log_register("pmd.crypto.virtio.driver");
1501         if (virtio_crypto_logtype_driver >= 0)
1502                 rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);
1503 }