1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
7 #include <rte_common.h>
10 #include <rte_bus_pci.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
18 #include "virtio_crypto_capabilities.h"
20 int virtio_crypto_logtype_init;
21 int virtio_crypto_logtype_session;
22 int virtio_crypto_logtype_rx;
23 int virtio_crypto_logtype_tx;
24 int virtio_crypto_logtype_driver;
26 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
27 struct rte_cryptodev_config *config);
28 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
29 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
30 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
31 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
32 struct rte_cryptodev_info *dev_info);
33 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
34 struct rte_cryptodev_stats *stats);
35 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
36 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
37 uint16_t queue_pair_id,
38 const struct rte_cryptodev_qp_conf *qp_conf,
40 struct rte_mempool *session_pool);
41 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
42 uint16_t queue_pair_id);
43 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
44 static unsigned int virtio_crypto_sym_get_session_private_size(
45 struct rte_cryptodev *dev);
46 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
47 struct rte_cryptodev_sym_session *sess);
48 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
49 struct rte_crypto_sym_xform *xform,
50 struct rte_cryptodev_sym_session *session,
51 struct rte_mempool *mp);
54 * The set of PCI devices this driver supports
56 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
57 { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
58 VIRTIO_CRYPTO_PCI_DEVICEID) },
59 { .vendor_id = 0, /* sentinel */ },
62 static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
63 VIRTIO_SYM_CAPABILITIES,
64 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
67 uint8_t cryptodev_virtio_driver_id;
69 #define NUM_ENTRY_SYM_CREATE_SESSION 4
72 virtio_crypto_send_command(struct virtqueue *vq,
73 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
74 uint8_t *auth_key, struct virtio_crypto_session *session)
79 uint32_t len_cipher_key = 0;
80 uint32_t len_auth_key = 0;
81 uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
82 uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
83 uint32_t len_total = 0;
84 uint32_t input_offset = 0;
85 void *virt_addr_started = NULL;
86 phys_addr_t phys_addr_started;
87 struct vring_desc *desc;
89 struct virtio_crypto_session_input *input;
92 PMD_INIT_FUNC_TRACE();
94 if (session == NULL) {
95 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
98 /* cipher only is supported, it is available if auth_key is NULL */
100 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
104 head = vq->vq_desc_head_idx;
105 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
108 if (vq->vq_free_cnt < needed) {
109 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
113 /* calculate the length of cipher key */
115 switch (ctrl->u.sym_create_session.op_type) {
116 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
118 = ctrl->u.sym_create_session.u.cipher
121 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
123 = ctrl->u.sym_create_session.u.chain
124 .para.cipher_param.keylen;
127 VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
132 /* calculate the length of auth key */
135 ctrl->u.sym_create_session.u.chain.para.u.mac_param
140 * malloc memory to store indirect vring_desc entries, including
141 * ctrl request, cipher key, auth key, session input and desc vring
143 desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
145 virt_addr_started = rte_malloc(NULL,
146 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
147 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
148 if (virt_addr_started == NULL) {
149 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
152 phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
154 /* address to store indirect vring desc entries */
155 desc = (struct vring_desc *)
156 ((uint8_t *)virt_addr_started + desc_offset);
159 memcpy(virt_addr_started, ctrl, len_ctrl_req);
160 desc[idx].addr = phys_addr_started;
161 desc[idx].len = len_ctrl_req;
162 desc[idx].flags = VRING_DESC_F_NEXT;
163 desc[idx].next = idx + 1;
165 len_total += len_ctrl_req;
166 input_offset += len_ctrl_req;
168 /* cipher key part */
169 if (len_cipher_key > 0) {
170 memcpy((uint8_t *)virt_addr_started + len_total,
171 cipher_key, len_cipher_key);
173 desc[idx].addr = phys_addr_started + len_total;
174 desc[idx].len = len_cipher_key;
175 desc[idx].flags = VRING_DESC_F_NEXT;
176 desc[idx].next = idx + 1;
178 len_total += len_cipher_key;
179 input_offset += len_cipher_key;
183 if (len_auth_key > 0) {
184 memcpy((uint8_t *)virt_addr_started + len_total,
185 auth_key, len_auth_key);
187 desc[idx].addr = phys_addr_started + len_total;
188 desc[idx].len = len_auth_key;
189 desc[idx].flags = VRING_DESC_F_NEXT;
190 desc[idx].next = idx + 1;
192 len_total += len_auth_key;
193 input_offset += len_auth_key;
197 input = (struct virtio_crypto_session_input *)
198 ((uint8_t *)virt_addr_started + input_offset);
199 input->status = VIRTIO_CRYPTO_ERR;
200 input->session_id = ~0ULL;
201 desc[idx].addr = phys_addr_started + len_total;
202 desc[idx].len = len_session_input;
203 desc[idx].flags = VRING_DESC_F_WRITE;
206 /* use a single desc entry */
207 vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
208 vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
209 vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
212 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
214 vq_update_avail_ring(vq, head);
215 vq_update_avail_idx(vq);
217 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
220 virtqueue_notify(vq);
223 while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
228 while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
229 uint32_t idx, desc_idx, used_idx;
230 struct vring_used_elem *uep;
232 used_idx = (uint32_t)(vq->vq_used_cons_idx
233 & (vq->vq_nentries - 1));
234 uep = &vq->vq_ring.used->ring[used_idx];
235 idx = (uint32_t) uep->id;
238 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
239 desc_idx = vq->vq_ring.desc[desc_idx].next;
243 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
244 vq->vq_desc_head_idx = idx;
246 vq->vq_used_cons_idx++;
250 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
251 "vq->vq_desc_head_idx=%d",
252 vq->vq_free_cnt, vq->vq_desc_head_idx);
255 if (input->status != VIRTIO_CRYPTO_OK) {
256 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
257 "status=%u, session_id=%" PRIu64 "",
258 input->status, input->session_id);
259 rte_free(virt_addr_started);
262 session->session_id = input->session_id;
264 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
265 "session_id=%" PRIu64 "", input->session_id);
266 rte_free(virt_addr_started);
274 virtio_crypto_queue_release(struct virtqueue *vq)
276 struct virtio_crypto_hw *hw;
278 PMD_INIT_FUNC_TRACE();
282 /* Select and deactivate the queue */
283 VTPCI_OPS(hw)->del_queue(hw, vq);
285 rte_memzone_free(vq->mz);
286 rte_mempool_free(vq->mpool);
291 #define MPOOL_MAX_NAME_SZ 32
294 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
296 uint16_t vtpci_queue_idx,
299 struct virtqueue **pvq)
301 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
302 char mpool_name[MPOOL_MAX_NAME_SZ];
303 const struct rte_memzone *mz;
304 unsigned int vq_size, size;
305 struct virtio_crypto_hw *hw = dev->data->dev_private;
306 struct virtqueue *vq = NULL;
310 PMD_INIT_FUNC_TRACE();
312 VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
315 * Read the virtqueue size from the Queue Size field
316 * Always power of 2 and if 0 virtqueue does not exist
318 vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
320 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
323 VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
325 if (!rte_is_power_of_2(vq_size)) {
326 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
330 if (queue_type == VTCRYPTO_DATAQ) {
331 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
332 dev->data->dev_id, vtpci_queue_idx);
333 snprintf(mpool_name, sizeof(mpool_name),
334 "dev%d_dataqueue%d_mpool",
335 dev->data->dev_id, vtpci_queue_idx);
336 } else if (queue_type == VTCRYPTO_CTRLQ) {
337 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
339 snprintf(mpool_name, sizeof(mpool_name),
340 "dev%d_controlqueue_mpool",
343 size = RTE_ALIGN_CEIL(sizeof(*vq) +
344 vq_size * sizeof(struct vq_desc_extra),
345 RTE_CACHE_LINE_SIZE);
346 vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
349 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
353 if (queue_type == VTCRYPTO_DATAQ) {
354 /* pre-allocate a mempool and use it in the data plane to
355 * improve performance
357 vq->mpool = rte_mempool_lookup(mpool_name);
358 if (vq->mpool == NULL)
359 vq->mpool = rte_mempool_create(mpool_name,
361 sizeof(struct virtio_crypto_op_cookie),
362 RTE_CACHE_LINE_SIZE, 0,
363 NULL, NULL, NULL, NULL, socket_id,
366 VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
367 "Cannot create mempool");
368 goto mpool_create_err;
370 for (i = 0; i < vq_size; i++) {
371 vq->vq_descx[i].cookie =
372 rte_zmalloc("crypto PMD op cookie pointer",
373 sizeof(struct virtio_crypto_op_cookie),
374 RTE_CACHE_LINE_SIZE);
375 if (vq->vq_descx[i].cookie == NULL) {
376 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
377 "alloc mem for cookie");
378 goto cookie_alloc_err;
384 vq->dev_id = dev->data->dev_id;
385 vq->vq_queue_index = vtpci_queue_idx;
386 vq->vq_nentries = vq_size;
389 * Using part of the vring entries is permitted, but the maximum
392 if (nb_desc == 0 || nb_desc > vq_size)
394 vq->vq_free_cnt = nb_desc;
397 * Reserve a memzone for vring elements
399 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
400 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
401 VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
402 (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
403 size, vq->vq_ring_size);
405 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
406 socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
408 if (rte_errno == EEXIST)
409 mz = rte_memzone_lookup(vq_name);
411 VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
417 * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
418 * and only accepts 32 bit page frame number.
419 * Check if the allocated physical memory exceeds 16TB.
421 if ((mz->phys_addr + vq->vq_ring_size - 1)
422 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
423 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
428 memset(mz->addr, 0, sizeof(mz->len));
430 vq->vq_ring_mem = mz->phys_addr;
431 vq->vq_ring_virt_mem = mz->addr;
432 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
433 (uint64_t)mz->phys_addr);
434 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
435 (uint64_t)(uintptr_t)mz->addr);
442 rte_memzone_free(mz);
445 rte_mempool_free(vq->mpool);
447 for (j = 0; j < i; j++)
448 rte_free(vq->vq_descx[j].cookie);
456 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
459 struct virtqueue *vq;
460 struct virtio_crypto_hw *hw = dev->data->dev_private;
462 /* if virtio device has started, do not touch the virtqueues */
463 if (dev->data->dev_started)
466 PMD_INIT_FUNC_TRACE();
468 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
469 0, SOCKET_ID_ANY, &vq);
471 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
481 virtio_crypto_free_queues(struct rte_cryptodev *dev)
484 struct virtio_crypto_hw *hw = dev->data->dev_private;
486 PMD_INIT_FUNC_TRACE();
488 /* control queue release */
489 virtio_crypto_queue_release(hw->cvq);
491 /* data queue release */
492 for (i = 0; i < hw->max_dataqueues; i++)
493 virtio_crypto_queue_release(dev->data->queue_pairs[i]);
497 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
503 * dev_ops for virtio, bare necessities for basic operation
505 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
506 /* Device related operations */
507 .dev_configure = virtio_crypto_dev_configure,
508 .dev_start = virtio_crypto_dev_start,
509 .dev_stop = virtio_crypto_dev_stop,
510 .dev_close = virtio_crypto_dev_close,
511 .dev_infos_get = virtio_crypto_dev_info_get,
513 .stats_get = virtio_crypto_dev_stats_get,
514 .stats_reset = virtio_crypto_dev_stats_reset,
516 .queue_pair_setup = virtio_crypto_qp_setup,
517 .queue_pair_release = virtio_crypto_qp_release,
518 .queue_pair_count = NULL,
520 /* Crypto related operations */
521 .sym_session_get_size = virtio_crypto_sym_get_session_private_size,
522 .sym_session_configure = virtio_crypto_sym_configure_session,
523 .sym_session_clear = virtio_crypto_sym_clear_session
527 virtio_crypto_update_stats(struct rte_cryptodev *dev,
528 struct rte_cryptodev_stats *stats)
531 struct virtio_crypto_hw *hw = dev->data->dev_private;
533 PMD_INIT_FUNC_TRACE();
536 VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
540 for (i = 0; i < hw->max_dataqueues; i++) {
541 const struct virtqueue *data_queue
542 = dev->data->queue_pairs[i];
543 if (data_queue == NULL)
546 stats->enqueued_count += data_queue->packets_sent_total;
547 stats->enqueue_err_count += data_queue->packets_sent_failed;
549 stats->dequeued_count += data_queue->packets_received_total;
550 stats->dequeue_err_count
551 += data_queue->packets_received_failed;
556 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
557 struct rte_cryptodev_stats *stats)
559 PMD_INIT_FUNC_TRACE();
561 virtio_crypto_update_stats(dev, stats);
565 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
568 struct virtio_crypto_hw *hw = dev->data->dev_private;
570 PMD_INIT_FUNC_TRACE();
572 for (i = 0; i < hw->max_dataqueues; i++) {
573 struct virtqueue *data_queue = dev->data->queue_pairs[i];
574 if (data_queue == NULL)
577 data_queue->packets_sent_total = 0;
578 data_queue->packets_sent_failed = 0;
580 data_queue->packets_received_total = 0;
581 data_queue->packets_received_failed = 0;
586 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
587 const struct rte_cryptodev_qp_conf *qp_conf,
589 struct rte_mempool *session_pool __rte_unused)
592 struct virtqueue *vq;
594 PMD_INIT_FUNC_TRACE();
596 /* if virtio dev is started, do not touch the virtqueues */
597 if (dev->data->dev_started)
600 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
601 qp_conf->nb_descriptors, socket_id, &vq);
603 VIRTIO_CRYPTO_INIT_LOG_ERR(
604 "virtio crypto data queue initialization failed\n");
608 dev->data->queue_pairs[queue_pair_id] = vq;
614 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
617 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
619 PMD_INIT_FUNC_TRACE();
622 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
626 virtio_crypto_queue_release(vq);
631 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
633 uint64_t host_features;
635 PMD_INIT_FUNC_TRACE();
637 /* Prepare guest_features: feature that driver wants to support */
638 VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
641 /* Read device(host) feature bits */
642 host_features = VTPCI_OPS(hw)->get_features(hw);
643 VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
647 * Negotiate features: Subset of device feature bits are written back
648 * guest feature bits.
650 hw->guest_features = req_features;
651 hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
653 VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
657 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
658 VIRTIO_CRYPTO_INIT_LOG_ERR(
659 "VIRTIO_F_VERSION_1 features is not enabled.");
662 vtpci_cryptodev_set_status(hw,
663 VIRTIO_CONFIG_STATUS_FEATURES_OK);
664 if (!(vtpci_cryptodev_get_status(hw) &
665 VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
666 VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
672 hw->req_guest_features = req_features;
677 /* reset device and renegotiate features if needed */
679 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
680 uint64_t req_features)
682 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
683 struct virtio_crypto_config local_config;
684 struct virtio_crypto_config *config = &local_config;
686 PMD_INIT_FUNC_TRACE();
688 /* Reset the device although not necessary at startup */
689 vtpci_cryptodev_reset(hw);
691 /* Tell the host we've noticed this device. */
692 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
694 /* Tell the host we've known how to drive the device. */
695 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
696 if (virtio_negotiate_features(hw, req_features) < 0)
699 /* Get status of the device */
700 vtpci_read_cryptodev_config(hw,
701 offsetof(struct virtio_crypto_config, status),
702 &config->status, sizeof(config->status));
703 if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
704 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
709 /* Get number of data queues */
710 vtpci_read_cryptodev_config(hw,
711 offsetof(struct virtio_crypto_config, max_dataqueues),
712 &config->max_dataqueues,
713 sizeof(config->max_dataqueues));
714 hw->max_dataqueues = config->max_dataqueues;
716 VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
723 * This function is based on probe() function
724 * It returns 0 on success.
727 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
728 struct rte_cryptodev_pmd_init_params *init_params)
730 struct rte_cryptodev *cryptodev;
731 struct virtio_crypto_hw *hw;
733 PMD_INIT_FUNC_TRACE();
735 cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
737 if (cryptodev == NULL)
740 cryptodev->driver_id = cryptodev_virtio_driver_id;
741 cryptodev->dev_ops = &virtio_crypto_dev_ops;
743 cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
744 cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
746 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
747 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
749 hw = cryptodev->data->dev_private;
750 hw->dev_id = cryptodev->data->dev_id;
751 hw->virtio_dev_capabilities = virtio_capabilities;
753 VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
754 cryptodev->data->dev_id, pci_dev->id.vendor_id,
755 pci_dev->id.device_id);
757 /* pci device init */
758 if (vtpci_cryptodev_init(pci_dev, hw))
761 if (virtio_crypto_init_device(cryptodev,
762 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
769 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
771 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
773 PMD_INIT_FUNC_TRACE();
775 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
778 if (cryptodev->data->dev_started) {
779 virtio_crypto_dev_stop(cryptodev);
780 virtio_crypto_dev_close(cryptodev);
783 cryptodev->dev_ops = NULL;
784 cryptodev->enqueue_burst = NULL;
785 cryptodev->dequeue_burst = NULL;
787 /* release control queue */
788 virtio_crypto_queue_release(hw->cvq);
790 rte_free(cryptodev->data);
791 cryptodev->data = NULL;
793 VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
799 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
800 struct rte_cryptodev_config *config __rte_unused)
802 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
804 PMD_INIT_FUNC_TRACE();
806 if (virtio_crypto_init_device(cryptodev,
807 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
810 /* setup control queue
811 * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
812 * config->max_dataqueues is the control queue
814 if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
815 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
818 virtio_crypto_ctrlq_start(cryptodev);
824 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
826 struct virtio_crypto_hw *hw = dev->data->dev_private;
828 PMD_INIT_FUNC_TRACE();
829 VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
831 vtpci_cryptodev_reset(hw);
833 virtio_crypto_dev_free_mbufs(dev);
834 virtio_crypto_free_queues(dev);
836 dev->data->dev_started = 0;
840 virtio_crypto_dev_start(struct rte_cryptodev *dev)
842 struct virtio_crypto_hw *hw = dev->data->dev_private;
844 if (dev->data->dev_started)
847 /* Do final configuration before queue engine starts */
848 virtio_crypto_dataq_start(dev);
849 vtpci_cryptodev_reinit_complete(hw);
851 dev->data->dev_started = 1;
857 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
860 struct virtio_crypto_hw *hw = dev->data->dev_private;
862 for (i = 0; i < hw->max_dataqueues; i++) {
863 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
864 "and unused buf", i);
865 VIRTQUEUE_DUMP((struct virtqueue *)
866 dev->data->queue_pairs[i]);
868 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
869 i, dev->data->queue_pairs[i]);
871 virtqueue_detatch_unused(dev->data->queue_pairs[i]);
873 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
876 (struct virtqueue *)dev->data->queue_pairs[i]);
881 virtio_crypto_sym_get_session_private_size(
882 struct rte_cryptodev *dev __rte_unused)
884 PMD_INIT_FUNC_TRACE();
886 return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
890 virtio_crypto_check_sym_session_paras(
891 struct rte_cryptodev *dev)
893 struct virtio_crypto_hw *hw;
895 PMD_INIT_FUNC_TRACE();
897 if (unlikely(dev == NULL)) {
898 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
901 if (unlikely(dev->data == NULL)) {
902 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
905 hw = dev->data->dev_private;
906 if (unlikely(hw == NULL)) {
907 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
910 if (unlikely(hw->cvq == NULL)) {
911 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
919 virtio_crypto_check_sym_clear_session_paras(
920 struct rte_cryptodev *dev,
921 struct rte_cryptodev_sym_session *sess)
923 PMD_INIT_FUNC_TRACE();
926 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
930 return virtio_crypto_check_sym_session_paras(dev);
933 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
936 virtio_crypto_sym_clear_session(
937 struct rte_cryptodev *dev,
938 struct rte_cryptodev_sym_session *sess)
940 struct virtio_crypto_hw *hw;
941 struct virtqueue *vq;
942 struct virtio_crypto_session *session;
943 struct virtio_crypto_op_ctrl_req *ctrl;
944 struct vring_desc *desc;
948 uint8_t *malloc_virt_addr;
949 uint64_t malloc_phys_addr;
950 uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
951 uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
952 uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
954 PMD_INIT_FUNC_TRACE();
956 if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
959 hw = dev->data->dev_private;
961 session = (struct virtio_crypto_session *)get_sym_session_private_data(
962 sess, cryptodev_virtio_driver_id);
963 if (session == NULL) {
964 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
968 VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
969 "vq = %p", vq->vq_desc_head_idx, vq);
971 if (vq->vq_free_cnt < needed) {
972 VIRTIO_CRYPTO_SESSION_LOG_ERR(
973 "vq->vq_free_cnt = %d is less than %d, "
974 "not enough", vq->vq_free_cnt, needed);
979 * malloc memory to store information of ctrl request op,
980 * returned status and desc vring
982 malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
983 + NUM_ENTRY_SYM_CLEAR_SESSION
984 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
985 if (malloc_virt_addr == NULL) {
986 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
989 malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
991 /* assign ctrl request op part */
992 ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
993 ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
994 /* default data virtqueue is 0 */
995 ctrl->header.queue_id = 0;
996 ctrl->u.destroy_session.session_id = session->session_id;
999 status = &(((struct virtio_crypto_inhdr *)
1000 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
1001 *status = VIRTIO_CRYPTO_ERR;
1003 /* indirect desc vring part */
1004 desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
1007 /* ctrl request part */
1008 desc[0].addr = malloc_phys_addr;
1009 desc[0].len = len_op_ctrl_req;
1010 desc[0].flags = VRING_DESC_F_NEXT;
1014 desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
1015 desc[1].len = len_inhdr;
1016 desc[1].flags = VRING_DESC_F_WRITE;
1018 /* use only a single desc entry */
1019 head = vq->vq_desc_head_idx;
1020 vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
1021 vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
1022 vq->vq_ring.desc[head].len
1023 = NUM_ENTRY_SYM_CLEAR_SESSION
1024 * sizeof(struct vring_desc);
1025 vq->vq_free_cnt -= needed;
1027 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
1029 vq_update_avail_ring(vq, head);
1030 vq_update_avail_idx(vq);
1032 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
1033 vq->vq_queue_index);
1035 virtqueue_notify(vq);
1038 while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
1043 while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
1044 uint32_t idx, desc_idx, used_idx;
1045 struct vring_used_elem *uep;
1047 used_idx = (uint32_t)(vq->vq_used_cons_idx
1048 & (vq->vq_nentries - 1));
1049 uep = &vq->vq_ring.used->ring[used_idx];
1050 idx = (uint32_t) uep->id;
1052 while (vq->vq_ring.desc[desc_idx].flags
1053 & VRING_DESC_F_NEXT) {
1054 desc_idx = vq->vq_ring.desc[desc_idx].next;
1058 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
1059 vq->vq_desc_head_idx = idx;
1060 vq->vq_used_cons_idx++;
1064 if (*status != VIRTIO_CRYPTO_OK) {
1065 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1066 "status=%"PRIu32", session_id=%"PRIu64"",
1067 *status, session->session_id);
1068 rte_free(malloc_virt_addr);
1072 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
1073 "vq->vq_desc_head_idx=%d",
1074 vq->vq_free_cnt, vq->vq_desc_head_idx);
1076 VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1077 session->session_id);
1079 memset(session, 0, sizeof(struct virtio_crypto_session));
1080 struct rte_mempool *sess_mp = rte_mempool_from_obj(session);
1081 set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL);
1082 rte_mempool_put(sess_mp, session);
1083 rte_free(malloc_virt_addr);
1086 static struct rte_crypto_cipher_xform *
1087 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1090 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1091 return &xform->cipher;
1093 xform = xform->next;
1099 static struct rte_crypto_auth_xform *
1100 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1103 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1104 return &xform->auth;
1106 xform = xform->next;
1112 /** Get xform chain order */
1114 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1120 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1121 xform->next == NULL)
1122 return VIRTIO_CRYPTO_CMD_CIPHER;
1124 /* Authentication Only */
1125 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1126 xform->next == NULL)
1127 return VIRTIO_CRYPTO_CMD_AUTH;
1129 /* Authenticate then Cipher */
1130 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1131 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1132 return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1134 /* Cipher then Authenticate */
1135 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1136 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1137 return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1143 virtio_crypto_sym_pad_cipher_param(
1144 struct virtio_crypto_cipher_session_para *para,
1145 struct rte_crypto_cipher_xform *cipher_xform)
1147 switch (cipher_xform->algo) {
1148 case RTE_CRYPTO_CIPHER_AES_CBC:
1149 para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
1152 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1153 "Cipher alg %u", cipher_xform->algo);
1157 para->keylen = cipher_xform->key.length;
1158 switch (cipher_xform->op) {
1159 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1160 para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1162 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1163 para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1166 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1175 virtio_crypto_sym_pad_auth_param(
1176 struct virtio_crypto_op_ctrl_req *ctrl,
1177 struct rte_crypto_auth_xform *auth_xform)
1180 struct virtio_crypto_alg_chain_session_para *para =
1181 &(ctrl->u.sym_create_session.u.chain.para);
1183 switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1184 case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1185 algo = &(para->u.hash_param.algo);
1187 case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1188 algo = &(para->u.mac_param.algo);
1191 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1193 ctrl->u.sym_create_session.u.chain.para.hash_mode);
1197 switch (auth_xform->algo) {
1198 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1199 *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
1202 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1203 "Crypto: Undefined Hash algo %u specified",
1212 virtio_crypto_sym_pad_op_ctrl_req(
1213 struct virtio_crypto_op_ctrl_req *ctrl,
1214 struct rte_crypto_sym_xform *xform, bool is_chainned,
1215 uint8_t **cipher_key_data, uint8_t **auth_key_data,
1216 struct virtio_crypto_session *session)
1219 struct rte_crypto_auth_xform *auth_xform = NULL;
1220 struct rte_crypto_cipher_xform *cipher_xform = NULL;
1222 /* Get cipher xform from crypto xform chain */
1223 cipher_xform = virtio_crypto_get_cipher_xform(xform);
1225 if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
1226 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1227 "cipher IV size cannot be longer than %u",
1228 VIRTIO_CRYPTO_MAX_IV_SIZE);
1232 ret = virtio_crypto_sym_pad_cipher_param(
1233 &ctrl->u.sym_create_session.u.chain.para
1234 .cipher_param, cipher_xform);
1236 ret = virtio_crypto_sym_pad_cipher_param(
1237 &ctrl->u.sym_create_session.u.cipher.para,
1241 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1242 "pad cipher parameter failed");
1246 *cipher_key_data = cipher_xform->key.data;
1248 session->iv.offset = cipher_xform->iv.offset;
1249 session->iv.length = cipher_xform->iv.length;
1252 /* Get auth xform from crypto xform chain */
1253 auth_xform = virtio_crypto_get_auth_xform(xform);
1255 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1256 struct virtio_crypto_alg_chain_session_para *para =
1257 &(ctrl->u.sym_create_session.u.chain.para);
1258 if (auth_xform->key.length) {
1259 para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1260 para->u.mac_param.auth_key_len =
1261 (uint32_t)auth_xform->key.length;
1262 para->u.mac_param.hash_result_len =
1263 auth_xform->digest_length;
1265 *auth_key_data = auth_xform->key.data;
1267 para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1268 para->u.hash_param.hash_result_len =
1269 auth_xform->digest_length;
1272 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1274 VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1284 virtio_crypto_check_sym_configure_session_paras(
1285 struct rte_cryptodev *dev,
1286 struct rte_crypto_sym_xform *xform,
1287 struct rte_cryptodev_sym_session *sym_sess,
1288 struct rte_mempool *mempool)
1290 if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
1291 unlikely(mempool == NULL)) {
1292 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1296 if (virtio_crypto_check_sym_session_paras(dev) < 0)
1303 virtio_crypto_sym_configure_session(
1304 struct rte_cryptodev *dev,
1305 struct rte_crypto_sym_xform *xform,
1306 struct rte_cryptodev_sym_session *sess,
1307 struct rte_mempool *mempool)
1310 struct virtio_crypto_session crypto_sess;
1311 void *session_private = &crypto_sess;
1312 struct virtio_crypto_session *session;
1313 struct virtio_crypto_op_ctrl_req *ctrl_req;
1314 enum virtio_crypto_cmd_id cmd_id;
1315 uint8_t *cipher_key_data = NULL;
1316 uint8_t *auth_key_data = NULL;
1317 struct virtio_crypto_hw *hw;
1318 struct virtqueue *control_vq;
1320 PMD_INIT_FUNC_TRACE();
1322 ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1325 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1329 if (rte_mempool_get(mempool, &session_private)) {
1330 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1331 "Couldn't get object from session mempool");
1335 session = (struct virtio_crypto_session *)session_private;
1336 memset(session, 0, sizeof(struct virtio_crypto_session));
1337 ctrl_req = &session->ctrl;
1338 ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1339 /* FIXME: support multiqueue */
1340 ctrl_req->header.queue_id = 0;
1342 hw = dev->data->dev_private;
1343 control_vq = hw->cvq;
1345 cmd_id = virtio_crypto_get_chain_order(xform);
1346 if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1347 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1348 = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1349 if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1350 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1351 = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1354 case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1355 case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1356 ctrl_req->u.sym_create_session.op_type
1357 = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1359 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1360 xform, true, &cipher_key_data, &auth_key_data, session);
1362 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1363 "padding sym op ctrl req failed");
1366 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1367 cipher_key_data, auth_key_data, session);
1369 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1370 "create session failed: %d", ret);
1374 case VIRTIO_CRYPTO_CMD_CIPHER:
1375 ctrl_req->u.sym_create_session.op_type
1376 = VIRTIO_CRYPTO_SYM_OP_CIPHER;
1377 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1378 false, &cipher_key_data, &auth_key_data, session);
1380 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1381 "padding sym op ctrl req failed");
1384 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1385 cipher_key_data, NULL, session);
1387 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1388 "create session failed: %d", ret);
1393 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1394 "Unsupported operation chain order parameter");
1398 set_sym_session_private_data(sess, dev->driver_id,
1408 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1409 struct rte_cryptodev_info *info)
1411 struct virtio_crypto_hw *hw = dev->data->dev_private;
1413 PMD_INIT_FUNC_TRACE();
1416 info->driver_id = cryptodev_virtio_driver_id;
1417 info->feature_flags = dev->feature_flags;
1418 info->max_nb_queue_pairs = hw->max_dataqueues;
1419 /* No limit of number of sessions */
1420 info->sym.max_nb_sessions = 0;
1421 info->capabilities = hw->virtio_dev_capabilities;
1426 crypto_virtio_pci_probe(
1427 struct rte_pci_driver *pci_drv __rte_unused,
1428 struct rte_pci_device *pci_dev)
1430 struct rte_cryptodev_pmd_init_params init_params = {
1432 .socket_id = rte_socket_id(),
1433 .private_data_size = sizeof(struct virtio_crypto_hw)
1435 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1437 VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1439 pci_dev->addr.devid,
1440 pci_dev->addr.function);
1442 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1444 return crypto_virtio_create(name, pci_dev, &init_params);
1448 crypto_virtio_pci_remove(
1449 struct rte_pci_device *pci_dev __rte_unused)
1451 struct rte_cryptodev *cryptodev;
1452 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1454 if (pci_dev == NULL)
1457 rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1458 sizeof(cryptodev_name));
1460 cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1461 if (cryptodev == NULL)
1464 return virtio_crypto_dev_uninit(cryptodev);
1467 static struct rte_pci_driver rte_virtio_crypto_driver = {
1468 .id_table = pci_id_virtio_crypto_map,
1470 .probe = crypto_virtio_pci_probe,
1471 .remove = crypto_virtio_pci_remove
1474 static struct cryptodev_driver virtio_crypto_drv;
1476 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1477 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1478 rte_virtio_crypto_driver.driver,
1479 cryptodev_virtio_driver_id);
1481 RTE_INIT(virtio_crypto_init_log)
1483 virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
1484 if (virtio_crypto_logtype_init >= 0)
1485 rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
1487 virtio_crypto_logtype_session =
1488 rte_log_register("pmd.crypto.virtio.session");
1489 if (virtio_crypto_logtype_session >= 0)
1490 rte_log_set_level(virtio_crypto_logtype_session,
1493 virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
1494 if (virtio_crypto_logtype_rx >= 0)
1495 rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
1497 virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
1498 if (virtio_crypto_logtype_tx >= 0)
1499 rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
1501 virtio_crypto_logtype_driver =
1502 rte_log_register("pmd.crypto.virtio.driver");
1503 if (virtio_crypto_logtype_driver >= 0)
1504 rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);