crypto/virtio: support stats related ops
[dpdk.git] / drivers / crypto / virtio / virtio_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 #include <stdbool.h>
5 #include <unistd.h>
6
7 #include <rte_common.h>
8 #include <rte_errno.h>
9 #include <rte_pci.h>
10 #include <rte_bus_pci.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_eal.h>
14
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
18
19 int virtio_crypto_logtype_init;
20 int virtio_crypto_logtype_session;
21 int virtio_crypto_logtype_rx;
22 int virtio_crypto_logtype_tx;
23 int virtio_crypto_logtype_driver;
24
25 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
26                 struct rte_cryptodev_config *config);
27 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
28 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
29 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
30 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
31                 struct rte_cryptodev_info *dev_info);
32 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
33                 struct rte_cryptodev_stats *stats);
34 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
35 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
36                 uint16_t queue_pair_id,
37                 const struct rte_cryptodev_qp_conf *qp_conf,
38                 int socket_id,
39                 struct rte_mempool *session_pool);
40 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
41                 uint16_t queue_pair_id);
42 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
43 static unsigned int virtio_crypto_sym_get_session_private_size(
44                 struct rte_cryptodev *dev);
45 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
46                 struct rte_cryptodev_sym_session *sess);
47 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
48                 struct rte_crypto_sym_xform *xform,
49                 struct rte_cryptodev_sym_session *session,
50                 struct rte_mempool *mp);
51
52 /*
53  * The set of PCI devices this driver supports
54  */
55 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
56         { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
57                                 VIRTIO_CRYPTO_PCI_DEVICEID) },
58         { .vendor_id = 0, /* sentinel */ },
59 };
60
61 uint8_t cryptodev_virtio_driver_id;
62
63 #define NUM_ENTRY_SYM_CREATE_SESSION 4
64
65 static int
66 virtio_crypto_send_command(struct virtqueue *vq,
67                 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
68                 uint8_t *auth_key, struct virtio_crypto_session *session)
69 {
70         uint8_t idx = 0;
71         uint8_t needed = 1;
72         uint32_t head = 0;
73         uint32_t len_cipher_key = 0;
74         uint32_t len_auth_key = 0;
75         uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
76         uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
77         uint32_t len_total = 0;
78         uint32_t input_offset = 0;
79         void *virt_addr_started = NULL;
80         phys_addr_t phys_addr_started;
81         struct vring_desc *desc;
82         uint32_t desc_offset;
83         struct virtio_crypto_session_input *input;
84         int ret;
85
86         PMD_INIT_FUNC_TRACE();
87
88         if (session == NULL) {
89                 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
90                 return -EINVAL;
91         }
92         /* cipher only is supported, it is available if auth_key is NULL */
93         if (!cipher_key) {
94                 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
95                 return -EINVAL;
96         }
97
98         head = vq->vq_desc_head_idx;
99         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
100                                         head, vq);
101
102         if (vq->vq_free_cnt < needed) {
103                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
104                 return -ENOSPC;
105         }
106
107         /* calculate the length of cipher key */
108         if (cipher_key) {
109                 switch (ctrl->u.sym_create_session.op_type) {
110                 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
111                         len_cipher_key
112                                 = ctrl->u.sym_create_session.u.cipher
113                                                         .para.keylen;
114                         break;
115                 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
116                         len_cipher_key
117                                 = ctrl->u.sym_create_session.u.chain
118                                         .para.cipher_param.keylen;
119                         break;
120                 default:
121                         VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
122                         return -EINVAL;
123                 }
124         }
125
126         /* calculate the length of auth key */
127         if (auth_key) {
128                 len_auth_key =
129                         ctrl->u.sym_create_session.u.chain.para.u.mac_param
130                                 .auth_key_len;
131         }
132
133         /*
134          * malloc memory to store indirect vring_desc entries, including
135          * ctrl request, cipher key, auth key, session input and desc vring
136          */
137         desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
138                 + len_session_input;
139         virt_addr_started = rte_malloc(NULL,
140                 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
141                         * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
142         if (virt_addr_started == NULL) {
143                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
144                 return -ENOSPC;
145         }
146         phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
147
148         /* address to store indirect vring desc entries */
149         desc = (struct vring_desc *)
150                 ((uint8_t *)virt_addr_started + desc_offset);
151
152         /*  ctrl req part */
153         memcpy(virt_addr_started, ctrl, len_ctrl_req);
154         desc[idx].addr = phys_addr_started;
155         desc[idx].len = len_ctrl_req;
156         desc[idx].flags = VRING_DESC_F_NEXT;
157         desc[idx].next = idx + 1;
158         idx++;
159         len_total += len_ctrl_req;
160         input_offset += len_ctrl_req;
161
162         /* cipher key part */
163         if (len_cipher_key > 0) {
164                 memcpy((uint8_t *)virt_addr_started + len_total,
165                         cipher_key, len_cipher_key);
166
167                 desc[idx].addr = phys_addr_started + len_total;
168                 desc[idx].len = len_cipher_key;
169                 desc[idx].flags = VRING_DESC_F_NEXT;
170                 desc[idx].next = idx + 1;
171                 idx++;
172                 len_total += len_cipher_key;
173                 input_offset += len_cipher_key;
174         }
175
176         /* auth key part */
177         if (len_auth_key > 0) {
178                 memcpy((uint8_t *)virt_addr_started + len_total,
179                         auth_key, len_auth_key);
180
181                 desc[idx].addr = phys_addr_started + len_total;
182                 desc[idx].len = len_auth_key;
183                 desc[idx].flags = VRING_DESC_F_NEXT;
184                 desc[idx].next = idx + 1;
185                 idx++;
186                 len_total += len_auth_key;
187                 input_offset += len_auth_key;
188         }
189
190         /* input part */
191         input = (struct virtio_crypto_session_input *)
192                 ((uint8_t *)virt_addr_started + input_offset);
193         input->status = VIRTIO_CRYPTO_ERR;
194         input->session_id = ~0ULL;
195         desc[idx].addr = phys_addr_started + len_total;
196         desc[idx].len = len_session_input;
197         desc[idx].flags = VRING_DESC_F_WRITE;
198         idx++;
199
200         /* use a single desc entry */
201         vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
202         vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
203         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
204         vq->vq_free_cnt--;
205
206         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
207
208         vq_update_avail_ring(vq, head);
209         vq_update_avail_idx(vq);
210
211         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
212                                         vq->vq_queue_index);
213
214         virtqueue_notify(vq);
215
216         rte_rmb();
217         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
218                 rte_rmb();
219                 usleep(100);
220         }
221
222         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
223                 uint32_t idx, desc_idx, used_idx;
224                 struct vring_used_elem *uep;
225
226                 used_idx = (uint32_t)(vq->vq_used_cons_idx
227                                 & (vq->vq_nentries - 1));
228                 uep = &vq->vq_ring.used->ring[used_idx];
229                 idx = (uint32_t) uep->id;
230                 desc_idx = idx;
231
232                 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
233                         desc_idx = vq->vq_ring.desc[desc_idx].next;
234                         vq->vq_free_cnt++;
235                 }
236
237                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
238                 vq->vq_desc_head_idx = idx;
239
240                 vq->vq_used_cons_idx++;
241                 vq->vq_free_cnt++;
242         }
243
244         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
245                         "vq->vq_desc_head_idx=%d",
246                         vq->vq_free_cnt, vq->vq_desc_head_idx);
247
248         /* get the result */
249         if (input->status != VIRTIO_CRYPTO_OK) {
250                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
251                                 "status=%u, session_id=%" PRIu64 "",
252                                 input->status, input->session_id);
253                 rte_free(virt_addr_started);
254                 ret = -1;
255         } else {
256                 session->session_id = input->session_id;
257
258                 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
259                                 "session_id=%" PRIu64 "", input->session_id);
260                 rte_free(virt_addr_started);
261                 ret = 0;
262         }
263
264         return ret;
265 }
266
267 void
268 virtio_crypto_queue_release(struct virtqueue *vq)
269 {
270         struct virtio_crypto_hw *hw;
271
272         PMD_INIT_FUNC_TRACE();
273
274         if (vq) {
275                 hw = vq->hw;
276                 /* Select and deactivate the queue */
277                 VTPCI_OPS(hw)->del_queue(hw, vq);
278
279                 rte_memzone_free(vq->mz);
280                 rte_mempool_free(vq->mpool);
281                 rte_free(vq);
282         }
283 }
284
285 #define MPOOL_MAX_NAME_SZ 32
286
287 int
288 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
289                 int queue_type,
290                 uint16_t vtpci_queue_idx,
291                 uint16_t nb_desc,
292                 int socket_id,
293                 struct virtqueue **pvq)
294 {
295         char vq_name[VIRTQUEUE_MAX_NAME_SZ];
296         char mpool_name[MPOOL_MAX_NAME_SZ];
297         const struct rte_memzone *mz;
298         unsigned int vq_size, size;
299         struct virtio_crypto_hw *hw = dev->data->dev_private;
300         struct virtqueue *vq = NULL;
301         uint32_t i = 0;
302         uint32_t j;
303
304         PMD_INIT_FUNC_TRACE();
305
306         VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
307
308         /*
309          * Read the virtqueue size from the Queue Size field
310          * Always power of 2 and if 0 virtqueue does not exist
311          */
312         vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
313         if (vq_size == 0) {
314                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
315                 return -EINVAL;
316         }
317         VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
318
319         if (!rte_is_power_of_2(vq_size)) {
320                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
321                 return -EINVAL;
322         }
323
324         if (queue_type == VTCRYPTO_DATAQ) {
325                 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
326                                 dev->data->dev_id, vtpci_queue_idx);
327                 snprintf(mpool_name, sizeof(mpool_name),
328                                 "dev%d_dataqueue%d_mpool",
329                                 dev->data->dev_id, vtpci_queue_idx);
330         } else if (queue_type == VTCRYPTO_CTRLQ) {
331                 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
332                                 dev->data->dev_id);
333                 snprintf(mpool_name, sizeof(mpool_name),
334                                 "dev%d_controlqueue_mpool",
335                                 dev->data->dev_id);
336         }
337         size = RTE_ALIGN_CEIL(sizeof(*vq) +
338                                 vq_size * sizeof(struct vq_desc_extra),
339                                 RTE_CACHE_LINE_SIZE);
340         vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
341                                 socket_id);
342         if (vq == NULL) {
343                 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
344                 return -ENOMEM;
345         }
346
347         if (queue_type == VTCRYPTO_DATAQ) {
348                 /* pre-allocate a mempool and use it in the data plane to
349                  * improve performance
350                  */
351                 vq->mpool = rte_mempool_lookup(mpool_name);
352                 if (vq->mpool == NULL)
353                         vq->mpool = rte_mempool_create(mpool_name,
354                                         vq_size,
355                                         sizeof(struct virtio_crypto_op_cookie),
356                                         RTE_CACHE_LINE_SIZE, 0,
357                                         NULL, NULL, NULL, NULL, socket_id,
358                                         0);
359                 if (!vq->mpool) {
360                         VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
361                                         "Cannot create mempool");
362                         goto mpool_create_err;
363                 }
364                 for (i = 0; i < vq_size; i++) {
365                         vq->vq_descx[i].cookie =
366                                 rte_zmalloc("crypto PMD op cookie pointer",
367                                         sizeof(struct virtio_crypto_op_cookie),
368                                         RTE_CACHE_LINE_SIZE);
369                         if (vq->vq_descx[i].cookie == NULL) {
370                                 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
371                                                 "alloc mem for cookie");
372                                 goto cookie_alloc_err;
373                         }
374                 }
375         }
376
377         vq->hw = hw;
378         vq->dev_id = dev->data->dev_id;
379         vq->vq_queue_index = vtpci_queue_idx;
380         vq->vq_nentries = vq_size;
381
382         /*
383          * Using part of the vring entries is permitted, but the maximum
384          * is vq_size
385          */
386         if (nb_desc == 0 || nb_desc > vq_size)
387                 nb_desc = vq_size;
388         vq->vq_free_cnt = nb_desc;
389
390         /*
391          * Reserve a memzone for vring elements
392          */
393         size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
394         vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
395         VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
396                         (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
397                         size, vq->vq_ring_size);
398
399         mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
400                         socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
401         if (mz == NULL) {
402                 if (rte_errno == EEXIST)
403                         mz = rte_memzone_lookup(vq_name);
404                 if (mz == NULL) {
405                         VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
406                         goto mz_reserve_err;
407                 }
408         }
409
410         /*
411          * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
412          * and only accepts 32 bit page frame number.
413          * Check if the allocated physical memory exceeds 16TB.
414          */
415         if ((mz->phys_addr + vq->vq_ring_size - 1)
416                                 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
417                 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
418                                         "above 16TB!");
419                 goto vring_addr_err;
420         }
421
422         memset(mz->addr, 0, sizeof(mz->len));
423         vq->mz = mz;
424         vq->vq_ring_mem = mz->phys_addr;
425         vq->vq_ring_virt_mem = mz->addr;
426         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
427                                         (uint64_t)mz->phys_addr);
428         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
429                                         (uint64_t)(uintptr_t)mz->addr);
430
431         *pvq = vq;
432
433         return 0;
434
435 vring_addr_err:
436         rte_memzone_free(mz);
437 mz_reserve_err:
438 cookie_alloc_err:
439         rte_mempool_free(vq->mpool);
440         if (i != 0) {
441                 for (j = 0; j < i; j++)
442                         rte_free(vq->vq_descx[j].cookie);
443         }
444 mpool_create_err:
445         rte_free(vq);
446         return -ENOMEM;
447 }
448
449 static int
450 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
451 {
452         int ret;
453         struct virtqueue *vq;
454         struct virtio_crypto_hw *hw = dev->data->dev_private;
455
456         /* if virtio device has started, do not touch the virtqueues */
457         if (dev->data->dev_started)
458                 return 0;
459
460         PMD_INIT_FUNC_TRACE();
461
462         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
463                         0, SOCKET_ID_ANY, &vq);
464         if (ret < 0) {
465                 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
466                 return ret;
467         }
468
469         hw->cvq = vq;
470
471         return 0;
472 }
473
474 static void
475 virtio_crypto_free_queues(struct rte_cryptodev *dev)
476 {
477         unsigned int i;
478         struct virtio_crypto_hw *hw = dev->data->dev_private;
479
480         PMD_INIT_FUNC_TRACE();
481
482         /* control queue release */
483         virtio_crypto_queue_release(hw->cvq);
484
485         /* data queue release */
486         for (i = 0; i < hw->max_dataqueues; i++)
487                 virtio_crypto_queue_release(dev->data->queue_pairs[i]);
488 }
489
490 static int
491 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
492 {
493         return 0;
494 }
495
496 /*
497  * dev_ops for virtio, bare necessities for basic operation
498  */
499 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
500         /* Device related operations */
501         .dev_configure                   = virtio_crypto_dev_configure,
502         .dev_start                       = virtio_crypto_dev_start,
503         .dev_stop                        = virtio_crypto_dev_stop,
504         .dev_close                       = virtio_crypto_dev_close,
505         .dev_infos_get                   = virtio_crypto_dev_info_get,
506
507         .stats_get                       = virtio_crypto_dev_stats_get,
508         .stats_reset                     = virtio_crypto_dev_stats_reset,
509
510         .queue_pair_setup                = virtio_crypto_qp_setup,
511         .queue_pair_release              = virtio_crypto_qp_release,
512         .queue_pair_start                = NULL,
513         .queue_pair_stop                 = NULL,
514         .queue_pair_count                = NULL,
515
516         /* Crypto related operations */
517         .session_get_size       = virtio_crypto_sym_get_session_private_size,
518         .session_configure      = virtio_crypto_sym_configure_session,
519         .session_clear          = virtio_crypto_sym_clear_session,
520         .qp_attach_session = NULL,
521         .qp_detach_session = NULL
522 };
523
524 static void
525 virtio_crypto_update_stats(struct rte_cryptodev *dev,
526                 struct rte_cryptodev_stats *stats)
527 {
528         unsigned int i;
529         struct virtio_crypto_hw *hw = dev->data->dev_private;
530
531         PMD_INIT_FUNC_TRACE();
532
533         if (stats == NULL) {
534                 VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
535                 return;
536         }
537
538         for (i = 0; i < hw->max_dataqueues; i++) {
539                 const struct virtqueue *data_queue
540                         = dev->data->queue_pairs[i];
541                 if (data_queue == NULL)
542                         continue;
543
544                 stats->enqueued_count += data_queue->packets_sent_total;
545                 stats->enqueue_err_count += data_queue->packets_sent_failed;
546
547                 stats->dequeued_count += data_queue->packets_received_total;
548                 stats->dequeue_err_count
549                         += data_queue->packets_received_failed;
550         }
551 }
552
553 static void
554 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
555                 struct rte_cryptodev_stats *stats)
556 {
557         PMD_INIT_FUNC_TRACE();
558
559         virtio_crypto_update_stats(dev, stats);
560 }
561
562 static void
563 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
564 {
565         unsigned int i;
566         struct virtio_crypto_hw *hw = dev->data->dev_private;
567
568         PMD_INIT_FUNC_TRACE();
569
570         for (i = 0; i < hw->max_dataqueues; i++) {
571                 struct virtqueue *data_queue = dev->data->queue_pairs[i];
572                 if (data_queue == NULL)
573                         continue;
574
575                 data_queue->packets_sent_total = 0;
576                 data_queue->packets_sent_failed = 0;
577
578                 data_queue->packets_received_total = 0;
579                 data_queue->packets_received_failed = 0;
580         }
581 }
582
583 static int
584 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
585                 const struct rte_cryptodev_qp_conf *qp_conf,
586                 int socket_id,
587                 struct rte_mempool *session_pool __rte_unused)
588 {
589         int ret;
590         struct virtqueue *vq;
591
592         PMD_INIT_FUNC_TRACE();
593
594         /* if virtio dev is started, do not touch the virtqueues */
595         if (dev->data->dev_started)
596                 return 0;
597
598         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
599                         qp_conf->nb_descriptors, socket_id, &vq);
600         if (ret < 0) {
601                 VIRTIO_CRYPTO_INIT_LOG_ERR(
602                         "virtio crypto data queue initialization failed\n");
603                 return ret;
604         }
605
606         dev->data->queue_pairs[queue_pair_id] = vq;
607
608         return 0;
609 }
610
611 static int
612 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
613 {
614         struct virtqueue *vq
615                 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
616
617         PMD_INIT_FUNC_TRACE();
618
619         if (vq == NULL) {
620                 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
621                 return 0;
622         }
623
624         virtio_crypto_queue_release(vq);
625         return 0;
626 }
627
628 static int
629 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
630 {
631         uint64_t host_features;
632
633         PMD_INIT_FUNC_TRACE();
634
635         /* Prepare guest_features: feature that driver wants to support */
636         VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
637                 req_features);
638
639         /* Read device(host) feature bits */
640         host_features = VTPCI_OPS(hw)->get_features(hw);
641         VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
642                 host_features);
643
644         /*
645          * Negotiate features: Subset of device feature bits are written back
646          * guest feature bits.
647          */
648         hw->guest_features = req_features;
649         hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
650                                                         host_features);
651         VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
652                 hw->guest_features);
653
654         if (hw->modern) {
655                 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
656                         VIRTIO_CRYPTO_INIT_LOG_ERR(
657                                 "VIRTIO_F_VERSION_1 features is not enabled.");
658                         return -1;
659                 }
660                 vtpci_cryptodev_set_status(hw,
661                         VIRTIO_CONFIG_STATUS_FEATURES_OK);
662                 if (!(vtpci_cryptodev_get_status(hw) &
663                         VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
664                         VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
665                                                 "status!");
666                         return -1;
667                 }
668         }
669
670         hw->req_guest_features = req_features;
671
672         return 0;
673 }
674
675 /* reset device and renegotiate features if needed */
676 static int
677 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
678         uint64_t req_features)
679 {
680         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
681         struct virtio_crypto_config local_config;
682         struct virtio_crypto_config *config = &local_config;
683
684         PMD_INIT_FUNC_TRACE();
685
686         /* Reset the device although not necessary at startup */
687         vtpci_cryptodev_reset(hw);
688
689         /* Tell the host we've noticed this device. */
690         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
691
692         /* Tell the host we've known how to drive the device. */
693         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
694         if (virtio_negotiate_features(hw, req_features) < 0)
695                 return -1;
696
697         /* Get status of the device */
698         vtpci_read_cryptodev_config(hw,
699                 offsetof(struct virtio_crypto_config, status),
700                 &config->status, sizeof(config->status));
701         if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
702                 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
703                                 "not ready");
704                 return -1;
705         }
706
707         /* Get number of data queues */
708         vtpci_read_cryptodev_config(hw,
709                 offsetof(struct virtio_crypto_config, max_dataqueues),
710                 &config->max_dataqueues,
711                 sizeof(config->max_dataqueues));
712         hw->max_dataqueues = config->max_dataqueues;
713
714         VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
715                 hw->max_dataqueues);
716
717         return 0;
718 }
719
720 /*
721  * This function is based on probe() function
722  * It returns 0 on success.
723  */
724 static int
725 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
726                 struct rte_cryptodev_pmd_init_params *init_params)
727 {
728         struct rte_cryptodev *cryptodev;
729         struct virtio_crypto_hw *hw;
730
731         PMD_INIT_FUNC_TRACE();
732
733         cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
734                                         init_params);
735         if (cryptodev == NULL)
736                 return -ENODEV;
737
738         cryptodev->driver_id = cryptodev_virtio_driver_id;
739         cryptodev->dev_ops = &virtio_crypto_dev_ops;
740
741         cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
742         cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
743
744         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
745                 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
746
747         hw = cryptodev->data->dev_private;
748         hw->dev_id = cryptodev->data->dev_id;
749
750         VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
751                 cryptodev->data->dev_id, pci_dev->id.vendor_id,
752                 pci_dev->id.device_id);
753
754         /* pci device init */
755         if (vtpci_cryptodev_init(pci_dev, hw))
756                 return -1;
757
758         if (virtio_crypto_init_device(cryptodev,
759                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
760                 return -1;
761
762         return 0;
763 }
764
765 static int
766 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
767 {
768         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
769
770         PMD_INIT_FUNC_TRACE();
771
772         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
773                 return -EPERM;
774
775         if (cryptodev->data->dev_started) {
776                 virtio_crypto_dev_stop(cryptodev);
777                 virtio_crypto_dev_close(cryptodev);
778         }
779
780         cryptodev->dev_ops = NULL;
781         cryptodev->enqueue_burst = NULL;
782         cryptodev->dequeue_burst = NULL;
783
784         /* release control queue */
785         virtio_crypto_queue_release(hw->cvq);
786
787         rte_free(cryptodev->data);
788         cryptodev->data = NULL;
789
790         VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
791
792         return 0;
793 }
794
795 static int
796 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
797         struct rte_cryptodev_config *config __rte_unused)
798 {
799         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
800
801         PMD_INIT_FUNC_TRACE();
802
803         if (virtio_crypto_init_device(cryptodev,
804                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
805                 return -1;
806
807         /* setup control queue
808          * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
809          * config->max_dataqueues is the control queue
810          */
811         if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
812                 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
813                 return -1;
814         }
815         virtio_crypto_ctrlq_start(cryptodev);
816
817         return 0;
818 }
819
820 static void
821 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
822 {
823         struct virtio_crypto_hw *hw = dev->data->dev_private;
824
825         PMD_INIT_FUNC_TRACE();
826         VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
827
828         vtpci_cryptodev_reset(hw);
829
830         virtio_crypto_dev_free_mbufs(dev);
831         virtio_crypto_free_queues(dev);
832
833         dev->data->dev_started = 0;
834 }
835
836 static int
837 virtio_crypto_dev_start(struct rte_cryptodev *dev)
838 {
839         struct virtio_crypto_hw *hw = dev->data->dev_private;
840
841         if (dev->data->dev_started)
842                 return 0;
843
844         /* Do final configuration before queue engine starts */
845         virtio_crypto_dataq_start(dev);
846         vtpci_cryptodev_reinit_complete(hw);
847
848         dev->data->dev_started = 1;
849
850         return 0;
851 }
852
853 static void
854 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
855 {
856         uint32_t i;
857         struct virtio_crypto_hw *hw = dev->data->dev_private;
858
859         for (i = 0; i < hw->max_dataqueues; i++) {
860                 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
861                         "and unused buf", i);
862                 VIRTQUEUE_DUMP((struct virtqueue *)
863                         dev->data->queue_pairs[i]);
864
865                 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
866                                 i, dev->data->queue_pairs[i]);
867
868                 virtqueue_detatch_unused(dev->data->queue_pairs[i]);
869
870                 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
871                                         "unused buf", i);
872                 VIRTQUEUE_DUMP(
873                         (struct virtqueue *)dev->data->queue_pairs[i]);
874         }
875 }
876
877 static unsigned int
878 virtio_crypto_sym_get_session_private_size(
879                 struct rte_cryptodev *dev __rte_unused)
880 {
881         PMD_INIT_FUNC_TRACE();
882
883         return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
884 }
885
886 static int
887 virtio_crypto_check_sym_session_paras(
888                 struct rte_cryptodev *dev)
889 {
890         struct virtio_crypto_hw *hw;
891
892         PMD_INIT_FUNC_TRACE();
893
894         if (unlikely(dev == NULL)) {
895                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
896                 return -1;
897         }
898         if (unlikely(dev->data == NULL)) {
899                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
900                 return -1;
901         }
902         hw = dev->data->dev_private;
903         if (unlikely(hw == NULL)) {
904                 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
905                 return -1;
906         }
907         if (unlikely(hw->cvq == NULL)) {
908                 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
909                 return -1;
910         }
911
912         return 0;
913 }
914
915 static int
916 virtio_crypto_check_sym_clear_session_paras(
917                 struct rte_cryptodev *dev,
918                 struct rte_cryptodev_sym_session *sess)
919 {
920         PMD_INIT_FUNC_TRACE();
921
922         if (sess == NULL) {
923                 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
924                 return -1;
925         }
926
927         return virtio_crypto_check_sym_session_paras(dev);
928 }
929
930 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
931
932 static void
933 virtio_crypto_sym_clear_session(
934                 struct rte_cryptodev *dev,
935                 struct rte_cryptodev_sym_session *sess)
936 {
937         struct virtio_crypto_hw *hw;
938         struct virtqueue *vq;
939         struct virtio_crypto_session *session;
940         struct virtio_crypto_op_ctrl_req *ctrl;
941         struct vring_desc *desc;
942         uint8_t *status;
943         uint8_t needed = 1;
944         uint32_t head;
945         uint8_t *malloc_virt_addr;
946         uint64_t malloc_phys_addr;
947         uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
948         uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
949         uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
950
951         PMD_INIT_FUNC_TRACE();
952
953         if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
954                 return;
955
956         hw = dev->data->dev_private;
957         vq = hw->cvq;
958         session = (struct virtio_crypto_session *)get_session_private_data(
959                 sess, cryptodev_virtio_driver_id);
960         if (session == NULL) {
961                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
962                 return;
963         }
964
965         VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
966                         "vq = %p", vq->vq_desc_head_idx, vq);
967
968         if (vq->vq_free_cnt < needed) {
969                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
970                                 "vq->vq_free_cnt = %d is less than %d, "
971                                 "not enough", vq->vq_free_cnt, needed);
972                 return;
973         }
974
975         /*
976          * malloc memory to store information of ctrl request op,
977          * returned status and desc vring
978          */
979         malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
980                 + NUM_ENTRY_SYM_CLEAR_SESSION
981                 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
982         if (malloc_virt_addr == NULL) {
983                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
984                 return;
985         }
986         malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
987
988         /* assign ctrl request op part */
989         ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
990         ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
991         /* default data virtqueue is 0 */
992         ctrl->header.queue_id = 0;
993         ctrl->u.destroy_session.session_id = session->session_id;
994
995         /* status part */
996         status = &(((struct virtio_crypto_inhdr *)
997                 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
998         *status = VIRTIO_CRYPTO_ERR;
999
1000         /* indirect desc vring part */
1001         desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
1002                 + desc_offset);
1003
1004         /* ctrl request part */
1005         desc[0].addr = malloc_phys_addr;
1006         desc[0].len = len_op_ctrl_req;
1007         desc[0].flags = VRING_DESC_F_NEXT;
1008         desc[0].next = 1;
1009
1010         /* status part */
1011         desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
1012         desc[1].len = len_inhdr;
1013         desc[1].flags = VRING_DESC_F_WRITE;
1014
1015         /* use only a single desc entry */
1016         head = vq->vq_desc_head_idx;
1017         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
1018         vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
1019         vq->vq_ring.desc[head].len
1020                 = NUM_ENTRY_SYM_CLEAR_SESSION
1021                 * sizeof(struct vring_desc);
1022         vq->vq_free_cnt -= needed;
1023
1024         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
1025
1026         vq_update_avail_ring(vq, head);
1027         vq_update_avail_idx(vq);
1028
1029         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
1030                                         vq->vq_queue_index);
1031
1032         virtqueue_notify(vq);
1033
1034         rte_rmb();
1035         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
1036                 rte_rmb();
1037                 usleep(100);
1038         }
1039
1040         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
1041                 uint32_t idx, desc_idx, used_idx;
1042                 struct vring_used_elem *uep;
1043
1044                 used_idx = (uint32_t)(vq->vq_used_cons_idx
1045                                 & (vq->vq_nentries - 1));
1046                 uep = &vq->vq_ring.used->ring[used_idx];
1047                 idx = (uint32_t) uep->id;
1048                 desc_idx = idx;
1049                 while (vq->vq_ring.desc[desc_idx].flags
1050                                 & VRING_DESC_F_NEXT) {
1051                         desc_idx = vq->vq_ring.desc[desc_idx].next;
1052                         vq->vq_free_cnt++;
1053                 }
1054
1055                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
1056                 vq->vq_desc_head_idx = idx;
1057                 vq->vq_used_cons_idx++;
1058                 vq->vq_free_cnt++;
1059         }
1060
1061         if (*status != VIRTIO_CRYPTO_OK) {
1062                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1063                                 "status=%"PRIu32", session_id=%"PRIu64"",
1064                                 *status, session->session_id);
1065                 rte_free(malloc_virt_addr);
1066                 return;
1067         }
1068
1069         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
1070                         "vq->vq_desc_head_idx=%d",
1071                         vq->vq_free_cnt, vq->vq_desc_head_idx);
1072
1073         VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1074                         session->session_id);
1075
1076         memset(sess, 0, sizeof(struct virtio_crypto_session));
1077         rte_free(malloc_virt_addr);
1078 }
1079
1080 static struct rte_crypto_cipher_xform *
1081 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1082 {
1083         do {
1084                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1085                         return &xform->cipher;
1086
1087                 xform = xform->next;
1088         } while (xform);
1089
1090         return NULL;
1091 }
1092
1093 static struct rte_crypto_auth_xform *
1094 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1095 {
1096         do {
1097                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1098                         return &xform->auth;
1099
1100                 xform = xform->next;
1101         } while (xform);
1102
1103         return NULL;
1104 }
1105
1106 /** Get xform chain order */
1107 static int
1108 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1109 {
1110         if (xform == NULL)
1111                 return -1;
1112
1113         /* Cipher Only */
1114         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1115                         xform->next == NULL)
1116                 return VIRTIO_CRYPTO_CMD_CIPHER;
1117
1118         /* Authentication Only */
1119         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1120                         xform->next == NULL)
1121                 return VIRTIO_CRYPTO_CMD_AUTH;
1122
1123         /* Authenticate then Cipher */
1124         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1125                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1126                 return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1127
1128         /* Cipher then Authenticate */
1129         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1130                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1131                 return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1132
1133         return -1;
1134 }
1135
1136 static int
1137 virtio_crypto_sym_pad_cipher_param(
1138                 struct virtio_crypto_cipher_session_para *para,
1139                 struct rte_crypto_cipher_xform *cipher_xform)
1140 {
1141         switch (cipher_xform->algo) {
1142         default:
1143                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1144                                 "Cipher alg %u", cipher_xform->algo);
1145                 return -1;
1146         }
1147
1148         para->keylen = cipher_xform->key.length;
1149         switch (cipher_xform->op) {
1150         case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1151                 para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1152                 break;
1153         case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1154                 para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1155                 break;
1156         default:
1157                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1158                                         "parameter");
1159                 return -1;
1160         }
1161
1162         return 0;
1163 }
1164
1165 static int
1166 virtio_crypto_sym_pad_auth_param(
1167                 struct virtio_crypto_op_ctrl_req *ctrl,
1168                 struct rte_crypto_auth_xform *auth_xform)
1169 {
1170         uint32_t *algo;
1171         struct virtio_crypto_alg_chain_session_para *para =
1172                 &(ctrl->u.sym_create_session.u.chain.para);
1173
1174         switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1175         case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1176                 algo = &(para->u.hash_param.algo);
1177                 break;
1178         case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1179                 algo = &(para->u.mac_param.algo);
1180                 break;
1181         default:
1182                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1183                         "specified",
1184                         ctrl->u.sym_create_session.u.chain.para.hash_mode);
1185                 return -1;
1186         }
1187
1188         switch (auth_xform->algo) {
1189         default:
1190                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1191                         "Crypto: Undefined Hash algo %u specified",
1192                         auth_xform->algo);
1193                 *algo = VIRTIO_CRYPTO_NO_MAC;
1194                 return -1;
1195         }
1196
1197         return 0;
1198 }
1199
1200 static int
1201 virtio_crypto_sym_pad_op_ctrl_req(
1202                 struct virtio_crypto_op_ctrl_req *ctrl,
1203                 struct rte_crypto_sym_xform *xform, bool is_chainned,
1204                 uint8_t **cipher_key_data, uint8_t **auth_key_data,
1205                 struct virtio_crypto_session *session)
1206 {
1207         int ret;
1208         struct rte_crypto_auth_xform *auth_xform = NULL;
1209         struct rte_crypto_cipher_xform *cipher_xform = NULL;
1210
1211         /* Get cipher xform from crypto xform chain */
1212         cipher_xform = virtio_crypto_get_cipher_xform(xform);
1213         if (cipher_xform) {
1214                 if (is_chainned)
1215                         ret = virtio_crypto_sym_pad_cipher_param(
1216                                 &ctrl->u.sym_create_session.u.chain.para
1217                                                 .cipher_param, cipher_xform);
1218                 else
1219                         ret = virtio_crypto_sym_pad_cipher_param(
1220                                 &ctrl->u.sym_create_session.u.cipher.para,
1221                                 cipher_xform);
1222
1223                 if (ret < 0) {
1224                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1225                                 "pad cipher parameter failed");
1226                         return -1;
1227                 }
1228
1229                 *cipher_key_data = cipher_xform->key.data;
1230
1231                 session->iv.offset = cipher_xform->iv.offset;
1232                 session->iv.length = cipher_xform->iv.length;
1233         }
1234
1235         /* Get auth xform from crypto xform chain */
1236         auth_xform = virtio_crypto_get_auth_xform(xform);
1237         if (auth_xform) {
1238                 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1239                 struct virtio_crypto_alg_chain_session_para *para =
1240                         &(ctrl->u.sym_create_session.u.chain.para);
1241                 if (auth_xform->key.length) {
1242                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1243                         para->u.mac_param.auth_key_len =
1244                                 (uint32_t)auth_xform->key.length;
1245                         para->u.mac_param.hash_result_len =
1246                                 auth_xform->digest_length;
1247
1248                         *auth_key_data = auth_xform->key.data;
1249                 } else {
1250                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1251                         para->u.hash_param.hash_result_len =
1252                                 auth_xform->digest_length;
1253                 }
1254
1255                 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1256                 if (ret < 0) {
1257                         VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1258                                                 "failed");
1259                         return -1;
1260                 }
1261         }
1262
1263         return 0;
1264 }
1265
1266 static int
1267 virtio_crypto_check_sym_configure_session_paras(
1268                 struct rte_cryptodev *dev,
1269                 struct rte_crypto_sym_xform *xform,
1270                 struct rte_cryptodev_sym_session *sym_sess,
1271                 struct rte_mempool *mempool)
1272 {
1273         if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
1274                 unlikely(mempool == NULL)) {
1275                 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1276                 return -1;
1277         }
1278
1279         if (virtio_crypto_check_sym_session_paras(dev) < 0)
1280                 return -1;
1281
1282         return 0;
1283 }
1284
1285 static int
1286 virtio_crypto_sym_configure_session(
1287                 struct rte_cryptodev *dev,
1288                 struct rte_crypto_sym_xform *xform,
1289                 struct rte_cryptodev_sym_session *sess,
1290                 struct rte_mempool *mempool)
1291 {
1292         int ret;
1293         struct virtio_crypto_session crypto_sess;
1294         void *session_private = &crypto_sess;
1295         struct virtio_crypto_session *session;
1296         struct virtio_crypto_op_ctrl_req *ctrl_req;
1297         enum virtio_crypto_cmd_id cmd_id;
1298         uint8_t *cipher_key_data = NULL;
1299         uint8_t *auth_key_data = NULL;
1300         struct virtio_crypto_hw *hw;
1301         struct virtqueue *control_vq;
1302
1303         PMD_INIT_FUNC_TRACE();
1304
1305         ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1306                         sess, mempool);
1307         if (ret < 0) {
1308                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1309                 return ret;
1310         }
1311
1312         if (rte_mempool_get(mempool, &session_private)) {
1313                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1314                         "Couldn't get object from session mempool");
1315                 return -ENOMEM;
1316         }
1317
1318         session = (struct virtio_crypto_session *)session_private;
1319         memset(session, 0, sizeof(struct virtio_crypto_session));
1320         ctrl_req = &session->ctrl;
1321         ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1322         /* FIXME: support multiqueue */
1323         ctrl_req->header.queue_id = 0;
1324
1325         hw = dev->data->dev_private;
1326         control_vq = hw->cvq;
1327
1328         cmd_id = virtio_crypto_get_chain_order(xform);
1329         if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1330                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1331                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1332         if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1333                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1334                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1335
1336         switch (cmd_id) {
1337         case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1338         case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1339                 ctrl_req->u.sym_create_session.op_type
1340                         = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1341
1342                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1343                         xform, true, &cipher_key_data, &auth_key_data, session);
1344                 if (ret < 0) {
1345                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1346                                 "padding sym op ctrl req failed");
1347                         goto error_out;
1348                 }
1349                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1350                         cipher_key_data, auth_key_data, session);
1351                 if (ret < 0) {
1352                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1353                                 "create session failed: %d", ret);
1354                         goto error_out;
1355                 }
1356                 break;
1357         case VIRTIO_CRYPTO_CMD_CIPHER:
1358                 ctrl_req->u.sym_create_session.op_type
1359                         = VIRTIO_CRYPTO_SYM_OP_CIPHER;
1360                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1361                         false, &cipher_key_data, &auth_key_data, session);
1362                 if (ret < 0) {
1363                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1364                                 "padding sym op ctrl req failed");
1365                         goto error_out;
1366                 }
1367                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1368                         cipher_key_data, NULL, session);
1369                 if (ret < 0) {
1370                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1371                                 "create session failed: %d", ret);
1372                         goto error_out;
1373                 }
1374                 break;
1375         default:
1376                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1377                         "Unsupported operation chain order parameter");
1378                 goto error_out;
1379         }
1380
1381         set_session_private_data(sess, dev->driver_id,
1382                 session_private);
1383
1384         return 0;
1385
1386 error_out:
1387         return -1;
1388 }
1389
1390 static void
1391 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1392                 struct rte_cryptodev_info *info)
1393 {
1394         struct virtio_crypto_hw *hw = dev->data->dev_private;
1395
1396         PMD_INIT_FUNC_TRACE();
1397
1398         if (info != NULL) {
1399                 info->driver_id = cryptodev_virtio_driver_id;
1400                 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1401                 info->feature_flags = dev->feature_flags;
1402                 info->max_nb_queue_pairs = hw->max_dataqueues;
1403                 info->sym.max_nb_sessions =
1404                         RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS;
1405         }
1406 }
1407
1408 static int
1409 crypto_virtio_pci_probe(
1410         struct rte_pci_driver *pci_drv __rte_unused,
1411         struct rte_pci_device *pci_dev)
1412 {
1413         struct rte_cryptodev_pmd_init_params init_params = {
1414                 .name = "",
1415                 .socket_id = rte_socket_id(),
1416                 .private_data_size = sizeof(struct virtio_crypto_hw),
1417                 .max_nb_sessions = RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS
1418         };
1419         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1420
1421         VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1422                         pci_dev->addr.bus,
1423                         pci_dev->addr.devid,
1424                         pci_dev->addr.function);
1425
1426         rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1427
1428         return crypto_virtio_create(name, pci_dev, &init_params);
1429 }
1430
1431 static int
1432 crypto_virtio_pci_remove(
1433         struct rte_pci_device *pci_dev __rte_unused)
1434 {
1435         struct rte_cryptodev *cryptodev;
1436         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1437
1438         if (pci_dev == NULL)
1439                 return -EINVAL;
1440
1441         rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1442                         sizeof(cryptodev_name));
1443
1444         cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1445         if (cryptodev == NULL)
1446                 return -ENODEV;
1447
1448         return virtio_crypto_dev_uninit(cryptodev);
1449 }
1450
1451 static struct rte_pci_driver rte_virtio_crypto_driver = {
1452         .id_table = pci_id_virtio_crypto_map,
1453         .drv_flags = 0,
1454         .probe = crypto_virtio_pci_probe,
1455         .remove = crypto_virtio_pci_remove
1456 };
1457
1458 static struct cryptodev_driver virtio_crypto_drv;
1459
1460 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1461 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1462         rte_virtio_crypto_driver.driver,
1463         cryptodev_virtio_driver_id);
1464
1465 RTE_INIT(virtio_crypto_init_log);
1466 static void
1467 virtio_crypto_init_log(void)
1468 {
1469         virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
1470         if (virtio_crypto_logtype_init >= 0)
1471                 rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
1472
1473         virtio_crypto_logtype_session =
1474                 rte_log_register("pmd.crypto.virtio.session");
1475         if (virtio_crypto_logtype_session >= 0)
1476                 rte_log_set_level(virtio_crypto_logtype_session,
1477                                 RTE_LOG_NOTICE);
1478
1479         virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
1480         if (virtio_crypto_logtype_rx >= 0)
1481                 rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
1482
1483         virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
1484         if (virtio_crypto_logtype_tx >= 0)
1485                 rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
1486
1487         virtio_crypto_logtype_driver =
1488                 rte_log_register("pmd.crypto.virtio.driver");
1489         if (virtio_crypto_logtype_driver >= 0)
1490                 rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);
1491 }