1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
7 #include <rte_common.h>
10 #include <rte_bus_pci.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
19 int virtio_crypto_logtype_init;
20 int virtio_crypto_logtype_session;
21 int virtio_crypto_logtype_rx;
22 int virtio_crypto_logtype_tx;
23 int virtio_crypto_logtype_driver;
25 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
26 struct rte_cryptodev_config *config);
27 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
28 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
29 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
30 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
31 struct rte_cryptodev_info *dev_info);
32 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
33 uint16_t queue_pair_id,
34 const struct rte_cryptodev_qp_conf *qp_conf,
36 struct rte_mempool *session_pool);
37 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
38 uint16_t queue_pair_id);
39 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
40 static unsigned int virtio_crypto_sym_get_session_private_size(
41 struct rte_cryptodev *dev);
42 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
43 struct rte_cryptodev_sym_session *sess);
44 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
45 struct rte_crypto_sym_xform *xform,
46 struct rte_cryptodev_sym_session *session,
47 struct rte_mempool *mp);
50 * The set of PCI devices this driver supports
52 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
53 { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
54 VIRTIO_CRYPTO_PCI_DEVICEID) },
55 { .vendor_id = 0, /* sentinel */ },
58 uint8_t cryptodev_virtio_driver_id;
60 #define NUM_ENTRY_SYM_CREATE_SESSION 4
63 virtio_crypto_send_command(struct virtqueue *vq,
64 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
65 uint8_t *auth_key, struct virtio_crypto_session *session)
70 uint32_t len_cipher_key = 0;
71 uint32_t len_auth_key = 0;
72 uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
73 uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
74 uint32_t len_total = 0;
75 uint32_t input_offset = 0;
76 void *virt_addr_started = NULL;
77 phys_addr_t phys_addr_started;
78 struct vring_desc *desc;
80 struct virtio_crypto_session_input *input;
83 PMD_INIT_FUNC_TRACE();
85 if (session == NULL) {
86 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
89 /* cipher only is supported, it is available if auth_key is NULL */
91 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
95 head = vq->vq_desc_head_idx;
96 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
99 if (vq->vq_free_cnt < needed) {
100 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
104 /* calculate the length of cipher key */
106 switch (ctrl->u.sym_create_session.op_type) {
107 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
109 = ctrl->u.sym_create_session.u.cipher
112 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
114 = ctrl->u.sym_create_session.u.chain
115 .para.cipher_param.keylen;
118 VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
123 /* calculate the length of auth key */
126 ctrl->u.sym_create_session.u.chain.para.u.mac_param
131 * malloc memory to store indirect vring_desc entries, including
132 * ctrl request, cipher key, auth key, session input and desc vring
134 desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
136 virt_addr_started = rte_malloc(NULL,
137 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
138 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
139 if (virt_addr_started == NULL) {
140 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
143 phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
145 /* address to store indirect vring desc entries */
146 desc = (struct vring_desc *)
147 ((uint8_t *)virt_addr_started + desc_offset);
150 memcpy(virt_addr_started, ctrl, len_ctrl_req);
151 desc[idx].addr = phys_addr_started;
152 desc[idx].len = len_ctrl_req;
153 desc[idx].flags = VRING_DESC_F_NEXT;
154 desc[idx].next = idx + 1;
156 len_total += len_ctrl_req;
157 input_offset += len_ctrl_req;
159 /* cipher key part */
160 if (len_cipher_key > 0) {
161 memcpy((uint8_t *)virt_addr_started + len_total,
162 cipher_key, len_cipher_key);
164 desc[idx].addr = phys_addr_started + len_total;
165 desc[idx].len = len_cipher_key;
166 desc[idx].flags = VRING_DESC_F_NEXT;
167 desc[idx].next = idx + 1;
169 len_total += len_cipher_key;
170 input_offset += len_cipher_key;
174 if (len_auth_key > 0) {
175 memcpy((uint8_t *)virt_addr_started + len_total,
176 auth_key, len_auth_key);
178 desc[idx].addr = phys_addr_started + len_total;
179 desc[idx].len = len_auth_key;
180 desc[idx].flags = VRING_DESC_F_NEXT;
181 desc[idx].next = idx + 1;
183 len_total += len_auth_key;
184 input_offset += len_auth_key;
188 input = (struct virtio_crypto_session_input *)
189 ((uint8_t *)virt_addr_started + input_offset);
190 input->status = VIRTIO_CRYPTO_ERR;
191 input->session_id = ~0ULL;
192 desc[idx].addr = phys_addr_started + len_total;
193 desc[idx].len = len_session_input;
194 desc[idx].flags = VRING_DESC_F_WRITE;
197 /* use a single desc entry */
198 vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
199 vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
200 vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
203 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
205 vq_update_avail_ring(vq, head);
206 vq_update_avail_idx(vq);
208 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
211 virtqueue_notify(vq);
214 while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
219 while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
220 uint32_t idx, desc_idx, used_idx;
221 struct vring_used_elem *uep;
223 used_idx = (uint32_t)(vq->vq_used_cons_idx
224 & (vq->vq_nentries - 1));
225 uep = &vq->vq_ring.used->ring[used_idx];
226 idx = (uint32_t) uep->id;
229 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
230 desc_idx = vq->vq_ring.desc[desc_idx].next;
234 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
235 vq->vq_desc_head_idx = idx;
237 vq->vq_used_cons_idx++;
241 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
242 "vq->vq_desc_head_idx=%d",
243 vq->vq_free_cnt, vq->vq_desc_head_idx);
246 if (input->status != VIRTIO_CRYPTO_OK) {
247 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
248 "status=%u, session_id=%" PRIu64 "",
249 input->status, input->session_id);
250 rte_free(virt_addr_started);
253 session->session_id = input->session_id;
255 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
256 "session_id=%" PRIu64 "", input->session_id);
257 rte_free(virt_addr_started);
265 virtio_crypto_queue_release(struct virtqueue *vq)
267 struct virtio_crypto_hw *hw;
269 PMD_INIT_FUNC_TRACE();
273 /* Select and deactivate the queue */
274 VTPCI_OPS(hw)->del_queue(hw, vq);
276 rte_memzone_free(vq->mz);
277 rte_mempool_free(vq->mpool);
282 #define MPOOL_MAX_NAME_SZ 32
285 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
287 uint16_t vtpci_queue_idx,
290 struct virtqueue **pvq)
292 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
293 char mpool_name[MPOOL_MAX_NAME_SZ];
294 const struct rte_memzone *mz;
295 unsigned int vq_size, size;
296 struct virtio_crypto_hw *hw = dev->data->dev_private;
297 struct virtqueue *vq = NULL;
301 PMD_INIT_FUNC_TRACE();
303 VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
306 * Read the virtqueue size from the Queue Size field
307 * Always power of 2 and if 0 virtqueue does not exist
309 vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
311 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
314 VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
316 if (!rte_is_power_of_2(vq_size)) {
317 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
321 if (queue_type == VTCRYPTO_DATAQ) {
322 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
323 dev->data->dev_id, vtpci_queue_idx);
324 snprintf(mpool_name, sizeof(mpool_name),
325 "dev%d_dataqueue%d_mpool",
326 dev->data->dev_id, vtpci_queue_idx);
327 } else if (queue_type == VTCRYPTO_CTRLQ) {
328 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
330 snprintf(mpool_name, sizeof(mpool_name),
331 "dev%d_controlqueue_mpool",
334 size = RTE_ALIGN_CEIL(sizeof(*vq) +
335 vq_size * sizeof(struct vq_desc_extra),
336 RTE_CACHE_LINE_SIZE);
337 vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
340 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
344 if (queue_type == VTCRYPTO_DATAQ) {
345 /* pre-allocate a mempool and use it in the data plane to
346 * improve performance
348 vq->mpool = rte_mempool_lookup(mpool_name);
349 if (vq->mpool == NULL)
350 vq->mpool = rte_mempool_create(mpool_name,
352 sizeof(struct virtio_crypto_op_cookie),
353 RTE_CACHE_LINE_SIZE, 0,
354 NULL, NULL, NULL, NULL, socket_id,
357 VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
358 "Cannot create mempool");
359 goto mpool_create_err;
361 for (i = 0; i < vq_size; i++) {
362 vq->vq_descx[i].cookie =
363 rte_zmalloc("crypto PMD op cookie pointer",
364 sizeof(struct virtio_crypto_op_cookie),
365 RTE_CACHE_LINE_SIZE);
366 if (vq->vq_descx[i].cookie == NULL) {
367 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
368 "alloc mem for cookie");
369 goto cookie_alloc_err;
375 vq->dev_id = dev->data->dev_id;
376 vq->vq_queue_index = vtpci_queue_idx;
377 vq->vq_nentries = vq_size;
380 * Using part of the vring entries is permitted, but the maximum
383 if (nb_desc == 0 || nb_desc > vq_size)
385 vq->vq_free_cnt = nb_desc;
388 * Reserve a memzone for vring elements
390 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
391 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
392 VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
393 (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
394 size, vq->vq_ring_size);
396 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
397 socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
399 if (rte_errno == EEXIST)
400 mz = rte_memzone_lookup(vq_name);
402 VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
408 * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
409 * and only accepts 32 bit page frame number.
410 * Check if the allocated physical memory exceeds 16TB.
412 if ((mz->phys_addr + vq->vq_ring_size - 1)
413 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
414 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
419 memset(mz->addr, 0, sizeof(mz->len));
421 vq->vq_ring_mem = mz->phys_addr;
422 vq->vq_ring_virt_mem = mz->addr;
423 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
424 (uint64_t)mz->phys_addr);
425 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
426 (uint64_t)(uintptr_t)mz->addr);
433 rte_memzone_free(mz);
436 rte_mempool_free(vq->mpool);
438 for (j = 0; j < i; j++)
439 rte_free(vq->vq_descx[j].cookie);
447 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
450 struct virtqueue *vq;
451 struct virtio_crypto_hw *hw = dev->data->dev_private;
453 /* if virtio device has started, do not touch the virtqueues */
454 if (dev->data->dev_started)
457 PMD_INIT_FUNC_TRACE();
459 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
460 0, SOCKET_ID_ANY, &vq);
462 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
472 virtio_crypto_free_queues(struct rte_cryptodev *dev)
475 struct virtio_crypto_hw *hw = dev->data->dev_private;
477 PMD_INIT_FUNC_TRACE();
479 /* control queue release */
480 virtio_crypto_queue_release(hw->cvq);
482 /* data queue release */
483 for (i = 0; i < hw->max_dataqueues; i++)
484 virtio_crypto_queue_release(dev->data->queue_pairs[i]);
488 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
494 * dev_ops for virtio, bare necessities for basic operation
496 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
497 /* Device related operations */
498 .dev_configure = virtio_crypto_dev_configure,
499 .dev_start = virtio_crypto_dev_start,
500 .dev_stop = virtio_crypto_dev_stop,
501 .dev_close = virtio_crypto_dev_close,
502 .dev_infos_get = virtio_crypto_dev_info_get,
507 .queue_pair_setup = virtio_crypto_qp_setup,
508 .queue_pair_release = virtio_crypto_qp_release,
509 .queue_pair_start = NULL,
510 .queue_pair_stop = NULL,
511 .queue_pair_count = NULL,
513 /* Crypto related operations */
514 .session_get_size = virtio_crypto_sym_get_session_private_size,
515 .session_configure = virtio_crypto_sym_configure_session,
516 .session_clear = virtio_crypto_sym_clear_session,
517 .qp_attach_session = NULL,
518 .qp_detach_session = NULL
522 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
523 const struct rte_cryptodev_qp_conf *qp_conf,
525 struct rte_mempool *session_pool __rte_unused)
528 struct virtqueue *vq;
530 PMD_INIT_FUNC_TRACE();
532 /* if virtio dev is started, do not touch the virtqueues */
533 if (dev->data->dev_started)
536 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
537 qp_conf->nb_descriptors, socket_id, &vq);
539 VIRTIO_CRYPTO_INIT_LOG_ERR(
540 "virtio crypto data queue initialization failed\n");
544 dev->data->queue_pairs[queue_pair_id] = vq;
550 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
553 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
555 PMD_INIT_FUNC_TRACE();
558 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
562 virtio_crypto_queue_release(vq);
567 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
569 uint64_t host_features;
571 PMD_INIT_FUNC_TRACE();
573 /* Prepare guest_features: feature that driver wants to support */
574 VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
577 /* Read device(host) feature bits */
578 host_features = VTPCI_OPS(hw)->get_features(hw);
579 VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
583 * Negotiate features: Subset of device feature bits are written back
584 * guest feature bits.
586 hw->guest_features = req_features;
587 hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
589 VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
593 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
594 VIRTIO_CRYPTO_INIT_LOG_ERR(
595 "VIRTIO_F_VERSION_1 features is not enabled.");
598 vtpci_cryptodev_set_status(hw,
599 VIRTIO_CONFIG_STATUS_FEATURES_OK);
600 if (!(vtpci_cryptodev_get_status(hw) &
601 VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
602 VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
608 hw->req_guest_features = req_features;
613 /* reset device and renegotiate features if needed */
615 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
616 uint64_t req_features)
618 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
619 struct virtio_crypto_config local_config;
620 struct virtio_crypto_config *config = &local_config;
622 PMD_INIT_FUNC_TRACE();
624 /* Reset the device although not necessary at startup */
625 vtpci_cryptodev_reset(hw);
627 /* Tell the host we've noticed this device. */
628 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
630 /* Tell the host we've known how to drive the device. */
631 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
632 if (virtio_negotiate_features(hw, req_features) < 0)
635 /* Get status of the device */
636 vtpci_read_cryptodev_config(hw,
637 offsetof(struct virtio_crypto_config, status),
638 &config->status, sizeof(config->status));
639 if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
640 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
645 /* Get number of data queues */
646 vtpci_read_cryptodev_config(hw,
647 offsetof(struct virtio_crypto_config, max_dataqueues),
648 &config->max_dataqueues,
649 sizeof(config->max_dataqueues));
650 hw->max_dataqueues = config->max_dataqueues;
652 VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
659 * This function is based on probe() function
660 * It returns 0 on success.
663 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
664 struct rte_cryptodev_pmd_init_params *init_params)
666 struct rte_cryptodev *cryptodev;
667 struct virtio_crypto_hw *hw;
669 PMD_INIT_FUNC_TRACE();
671 cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
673 if (cryptodev == NULL)
676 cryptodev->driver_id = cryptodev_virtio_driver_id;
677 cryptodev->dev_ops = &virtio_crypto_dev_ops;
679 cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
680 cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
682 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
683 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
685 hw = cryptodev->data->dev_private;
686 hw->dev_id = cryptodev->data->dev_id;
688 VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
689 cryptodev->data->dev_id, pci_dev->id.vendor_id,
690 pci_dev->id.device_id);
692 /* pci device init */
693 if (vtpci_cryptodev_init(pci_dev, hw))
696 if (virtio_crypto_init_device(cryptodev,
697 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
704 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
706 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
708 PMD_INIT_FUNC_TRACE();
710 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
713 if (cryptodev->data->dev_started) {
714 virtio_crypto_dev_stop(cryptodev);
715 virtio_crypto_dev_close(cryptodev);
718 cryptodev->dev_ops = NULL;
719 cryptodev->enqueue_burst = NULL;
720 cryptodev->dequeue_burst = NULL;
722 /* release control queue */
723 virtio_crypto_queue_release(hw->cvq);
725 rte_free(cryptodev->data);
726 cryptodev->data = NULL;
728 VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
734 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
735 struct rte_cryptodev_config *config __rte_unused)
737 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
739 PMD_INIT_FUNC_TRACE();
741 if (virtio_crypto_init_device(cryptodev,
742 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
745 /* setup control queue
746 * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
747 * config->max_dataqueues is the control queue
749 if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
750 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
753 virtio_crypto_ctrlq_start(cryptodev);
759 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
761 struct virtio_crypto_hw *hw = dev->data->dev_private;
763 PMD_INIT_FUNC_TRACE();
764 VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
766 vtpci_cryptodev_reset(hw);
768 virtio_crypto_dev_free_mbufs(dev);
769 virtio_crypto_free_queues(dev);
771 dev->data->dev_started = 0;
775 virtio_crypto_dev_start(struct rte_cryptodev *dev)
777 struct virtio_crypto_hw *hw = dev->data->dev_private;
779 if (dev->data->dev_started)
782 /* Do final configuration before queue engine starts */
783 virtio_crypto_dataq_start(dev);
784 vtpci_cryptodev_reinit_complete(hw);
786 dev->data->dev_started = 1;
792 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
795 struct virtio_crypto_hw *hw = dev->data->dev_private;
797 for (i = 0; i < hw->max_dataqueues; i++) {
798 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
799 "and unused buf", i);
800 VIRTQUEUE_DUMP((struct virtqueue *)
801 dev->data->queue_pairs[i]);
803 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
804 i, dev->data->queue_pairs[i]);
806 virtqueue_detatch_unused(dev->data->queue_pairs[i]);
808 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
811 (struct virtqueue *)dev->data->queue_pairs[i]);
816 virtio_crypto_sym_get_session_private_size(
817 struct rte_cryptodev *dev __rte_unused)
819 PMD_INIT_FUNC_TRACE();
821 return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
825 virtio_crypto_check_sym_session_paras(
826 struct rte_cryptodev *dev)
828 struct virtio_crypto_hw *hw;
830 PMD_INIT_FUNC_TRACE();
832 if (unlikely(dev == NULL)) {
833 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
836 if (unlikely(dev->data == NULL)) {
837 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
840 hw = dev->data->dev_private;
841 if (unlikely(hw == NULL)) {
842 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
845 if (unlikely(hw->cvq == NULL)) {
846 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
854 virtio_crypto_check_sym_clear_session_paras(
855 struct rte_cryptodev *dev,
856 struct rte_cryptodev_sym_session *sess)
858 PMD_INIT_FUNC_TRACE();
861 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
865 return virtio_crypto_check_sym_session_paras(dev);
868 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
871 virtio_crypto_sym_clear_session(
872 struct rte_cryptodev *dev,
873 struct rte_cryptodev_sym_session *sess)
875 struct virtio_crypto_hw *hw;
876 struct virtqueue *vq;
877 struct virtio_crypto_session *session;
878 struct virtio_crypto_op_ctrl_req *ctrl;
879 struct vring_desc *desc;
883 uint8_t *malloc_virt_addr;
884 uint64_t malloc_phys_addr;
885 uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
886 uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
887 uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
889 PMD_INIT_FUNC_TRACE();
891 if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
894 hw = dev->data->dev_private;
896 session = (struct virtio_crypto_session *)get_session_private_data(
897 sess, cryptodev_virtio_driver_id);
898 if (session == NULL) {
899 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
903 VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
904 "vq = %p", vq->vq_desc_head_idx, vq);
906 if (vq->vq_free_cnt < needed) {
907 VIRTIO_CRYPTO_SESSION_LOG_ERR(
908 "vq->vq_free_cnt = %d is less than %d, "
909 "not enough", vq->vq_free_cnt, needed);
914 * malloc memory to store information of ctrl request op,
915 * returned status and desc vring
917 malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
918 + NUM_ENTRY_SYM_CLEAR_SESSION
919 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
920 if (malloc_virt_addr == NULL) {
921 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
924 malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
926 /* assign ctrl request op part */
927 ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
928 ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
929 /* default data virtqueue is 0 */
930 ctrl->header.queue_id = 0;
931 ctrl->u.destroy_session.session_id = session->session_id;
934 status = &(((struct virtio_crypto_inhdr *)
935 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
936 *status = VIRTIO_CRYPTO_ERR;
938 /* indirect desc vring part */
939 desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
942 /* ctrl request part */
943 desc[0].addr = malloc_phys_addr;
944 desc[0].len = len_op_ctrl_req;
945 desc[0].flags = VRING_DESC_F_NEXT;
949 desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
950 desc[1].len = len_inhdr;
951 desc[1].flags = VRING_DESC_F_WRITE;
953 /* use only a single desc entry */
954 head = vq->vq_desc_head_idx;
955 vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
956 vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
957 vq->vq_ring.desc[head].len
958 = NUM_ENTRY_SYM_CLEAR_SESSION
959 * sizeof(struct vring_desc);
960 vq->vq_free_cnt -= needed;
962 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
964 vq_update_avail_ring(vq, head);
965 vq_update_avail_idx(vq);
967 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
970 virtqueue_notify(vq);
973 while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
978 while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
979 uint32_t idx, desc_idx, used_idx;
980 struct vring_used_elem *uep;
982 used_idx = (uint32_t)(vq->vq_used_cons_idx
983 & (vq->vq_nentries - 1));
984 uep = &vq->vq_ring.used->ring[used_idx];
985 idx = (uint32_t) uep->id;
987 while (vq->vq_ring.desc[desc_idx].flags
988 & VRING_DESC_F_NEXT) {
989 desc_idx = vq->vq_ring.desc[desc_idx].next;
993 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
994 vq->vq_desc_head_idx = idx;
995 vq->vq_used_cons_idx++;
999 if (*status != VIRTIO_CRYPTO_OK) {
1000 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1001 "status=%"PRIu32", session_id=%"PRIu64"",
1002 *status, session->session_id);
1003 rte_free(malloc_virt_addr);
1007 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
1008 "vq->vq_desc_head_idx=%d",
1009 vq->vq_free_cnt, vq->vq_desc_head_idx);
1011 VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1012 session->session_id);
1014 memset(sess, 0, sizeof(struct virtio_crypto_session));
1015 rte_free(malloc_virt_addr);
1018 static struct rte_crypto_cipher_xform *
1019 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1022 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1023 return &xform->cipher;
1025 xform = xform->next;
1031 static struct rte_crypto_auth_xform *
1032 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1035 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1036 return &xform->auth;
1038 xform = xform->next;
1044 /** Get xform chain order */
1046 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1052 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1053 xform->next == NULL)
1054 return VIRTIO_CRYPTO_CMD_CIPHER;
1056 /* Authentication Only */
1057 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1058 xform->next == NULL)
1059 return VIRTIO_CRYPTO_CMD_AUTH;
1061 /* Authenticate then Cipher */
1062 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1063 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1064 return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1066 /* Cipher then Authenticate */
1067 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1068 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1069 return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1075 virtio_crypto_sym_pad_cipher_param(
1076 struct virtio_crypto_cipher_session_para *para,
1077 struct rte_crypto_cipher_xform *cipher_xform)
1079 switch (cipher_xform->algo) {
1081 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1082 "Cipher alg %u", cipher_xform->algo);
1086 para->keylen = cipher_xform->key.length;
1087 switch (cipher_xform->op) {
1088 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1089 para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1091 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1092 para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1095 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1104 virtio_crypto_sym_pad_auth_param(
1105 struct virtio_crypto_op_ctrl_req *ctrl,
1106 struct rte_crypto_auth_xform *auth_xform)
1109 struct virtio_crypto_alg_chain_session_para *para =
1110 &(ctrl->u.sym_create_session.u.chain.para);
1112 switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1113 case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1114 algo = &(para->u.hash_param.algo);
1116 case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1117 algo = &(para->u.mac_param.algo);
1120 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1122 ctrl->u.sym_create_session.u.chain.para.hash_mode);
1126 switch (auth_xform->algo) {
1128 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1129 "Crypto: Undefined Hash algo %u specified",
1131 *algo = VIRTIO_CRYPTO_NO_MAC;
1139 virtio_crypto_sym_pad_op_ctrl_req(
1140 struct virtio_crypto_op_ctrl_req *ctrl,
1141 struct rte_crypto_sym_xform *xform, bool is_chainned,
1142 uint8_t **cipher_key_data, uint8_t **auth_key_data,
1143 struct virtio_crypto_session *session)
1146 struct rte_crypto_auth_xform *auth_xform = NULL;
1147 struct rte_crypto_cipher_xform *cipher_xform = NULL;
1149 /* Get cipher xform from crypto xform chain */
1150 cipher_xform = virtio_crypto_get_cipher_xform(xform);
1153 ret = virtio_crypto_sym_pad_cipher_param(
1154 &ctrl->u.sym_create_session.u.chain.para
1155 .cipher_param, cipher_xform);
1157 ret = virtio_crypto_sym_pad_cipher_param(
1158 &ctrl->u.sym_create_session.u.cipher.para,
1162 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1163 "pad cipher parameter failed");
1167 *cipher_key_data = cipher_xform->key.data;
1169 session->iv.offset = cipher_xform->iv.offset;
1170 session->iv.length = cipher_xform->iv.length;
1173 /* Get auth xform from crypto xform chain */
1174 auth_xform = virtio_crypto_get_auth_xform(xform);
1176 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1177 struct virtio_crypto_alg_chain_session_para *para =
1178 &(ctrl->u.sym_create_session.u.chain.para);
1179 if (auth_xform->key.length) {
1180 para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1181 para->u.mac_param.auth_key_len =
1182 (uint32_t)auth_xform->key.length;
1183 para->u.mac_param.hash_result_len =
1184 auth_xform->digest_length;
1186 *auth_key_data = auth_xform->key.data;
1188 para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1189 para->u.hash_param.hash_result_len =
1190 auth_xform->digest_length;
1193 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1195 VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1205 virtio_crypto_check_sym_configure_session_paras(
1206 struct rte_cryptodev *dev,
1207 struct rte_crypto_sym_xform *xform,
1208 struct rte_cryptodev_sym_session *sym_sess,
1209 struct rte_mempool *mempool)
1211 if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
1212 unlikely(mempool == NULL)) {
1213 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1217 if (virtio_crypto_check_sym_session_paras(dev) < 0)
1224 virtio_crypto_sym_configure_session(
1225 struct rte_cryptodev *dev,
1226 struct rte_crypto_sym_xform *xform,
1227 struct rte_cryptodev_sym_session *sess,
1228 struct rte_mempool *mempool)
1231 struct virtio_crypto_session crypto_sess;
1232 void *session_private = &crypto_sess;
1233 struct virtio_crypto_session *session;
1234 struct virtio_crypto_op_ctrl_req *ctrl_req;
1235 enum virtio_crypto_cmd_id cmd_id;
1236 uint8_t *cipher_key_data = NULL;
1237 uint8_t *auth_key_data = NULL;
1238 struct virtio_crypto_hw *hw;
1239 struct virtqueue *control_vq;
1241 PMD_INIT_FUNC_TRACE();
1243 ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1246 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1250 if (rte_mempool_get(mempool, &session_private)) {
1251 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1252 "Couldn't get object from session mempool");
1256 session = (struct virtio_crypto_session *)session_private;
1257 memset(session, 0, sizeof(struct virtio_crypto_session));
1258 ctrl_req = &session->ctrl;
1259 ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1260 /* FIXME: support multiqueue */
1261 ctrl_req->header.queue_id = 0;
1263 hw = dev->data->dev_private;
1264 control_vq = hw->cvq;
1266 cmd_id = virtio_crypto_get_chain_order(xform);
1267 if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1268 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1269 = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1270 if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1271 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1272 = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1275 case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1276 case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1277 ctrl_req->u.sym_create_session.op_type
1278 = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1280 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1281 xform, true, &cipher_key_data, &auth_key_data, session);
1283 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1284 "padding sym op ctrl req failed");
1287 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1288 cipher_key_data, auth_key_data, session);
1290 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1291 "create session failed: %d", ret);
1295 case VIRTIO_CRYPTO_CMD_CIPHER:
1296 ctrl_req->u.sym_create_session.op_type
1297 = VIRTIO_CRYPTO_SYM_OP_CIPHER;
1298 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1299 false, &cipher_key_data, &auth_key_data, session);
1301 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1302 "padding sym op ctrl req failed");
1305 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1306 cipher_key_data, NULL, session);
1308 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1309 "create session failed: %d", ret);
1314 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1315 "Unsupported operation chain order parameter");
1319 set_session_private_data(sess, dev->driver_id,
1329 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1330 struct rte_cryptodev_info *info)
1332 struct virtio_crypto_hw *hw = dev->data->dev_private;
1334 PMD_INIT_FUNC_TRACE();
1337 info->driver_id = cryptodev_virtio_driver_id;
1338 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1339 info->feature_flags = dev->feature_flags;
1340 info->max_nb_queue_pairs = hw->max_dataqueues;
1341 info->sym.max_nb_sessions =
1342 RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS;
1347 crypto_virtio_pci_probe(
1348 struct rte_pci_driver *pci_drv __rte_unused,
1349 struct rte_pci_device *pci_dev)
1351 struct rte_cryptodev_pmd_init_params init_params = {
1353 .socket_id = rte_socket_id(),
1354 .private_data_size = sizeof(struct virtio_crypto_hw),
1355 .max_nb_sessions = RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS
1357 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1359 VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1361 pci_dev->addr.devid,
1362 pci_dev->addr.function);
1364 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1366 return crypto_virtio_create(name, pci_dev, &init_params);
1370 crypto_virtio_pci_remove(
1371 struct rte_pci_device *pci_dev __rte_unused)
1373 struct rte_cryptodev *cryptodev;
1374 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1376 if (pci_dev == NULL)
1379 rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1380 sizeof(cryptodev_name));
1382 cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1383 if (cryptodev == NULL)
1386 return virtio_crypto_dev_uninit(cryptodev);
1389 static struct rte_pci_driver rte_virtio_crypto_driver = {
1390 .id_table = pci_id_virtio_crypto_map,
1392 .probe = crypto_virtio_pci_probe,
1393 .remove = crypto_virtio_pci_remove
1396 static struct cryptodev_driver virtio_crypto_drv;
1398 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1399 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1400 rte_virtio_crypto_driver.driver,
1401 cryptodev_virtio_driver_id);
1403 RTE_INIT(virtio_crypto_init_log);
1405 virtio_crypto_init_log(void)
1407 virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
1408 if (virtio_crypto_logtype_init >= 0)
1409 rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
1411 virtio_crypto_logtype_session =
1412 rte_log_register("pmd.crypto.virtio.session");
1413 if (virtio_crypto_logtype_session >= 0)
1414 rte_log_set_level(virtio_crypto_logtype_session,
1417 virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
1418 if (virtio_crypto_logtype_rx >= 0)
1419 rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
1421 virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
1422 if (virtio_crypto_logtype_tx >= 0)
1423 rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
1425 virtio_crypto_logtype_driver =
1426 rte_log_register("pmd.crypto.virtio.driver");
1427 if (virtio_crypto_logtype_driver >= 0)
1428 rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);