1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
7 #include <rte_common.h>
10 #include <rte_bus_pci.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
18 #include "virtio_crypto_capabilities.h"
20 int virtio_crypto_logtype_init;
21 int virtio_crypto_logtype_session;
22 int virtio_crypto_logtype_rx;
23 int virtio_crypto_logtype_tx;
24 int virtio_crypto_logtype_driver;
26 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
27 struct rte_cryptodev_config *config);
28 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
29 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
30 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
31 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
32 struct rte_cryptodev_info *dev_info);
33 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
34 struct rte_cryptodev_stats *stats);
35 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
36 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
37 uint16_t queue_pair_id,
38 const struct rte_cryptodev_qp_conf *qp_conf,
40 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
41 uint16_t queue_pair_id);
42 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
43 static unsigned int virtio_crypto_sym_get_session_private_size(
44 struct rte_cryptodev *dev);
45 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
46 struct rte_cryptodev_sym_session *sess);
47 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
48 struct rte_crypto_sym_xform *xform,
49 struct rte_cryptodev_sym_session *session,
50 struct rte_mempool *mp);
53 * The set of PCI devices this driver supports
55 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
56 { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
57 VIRTIO_CRYPTO_PCI_DEVICEID) },
58 { .vendor_id = 0, /* sentinel */ },
61 static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
62 VIRTIO_SYM_CAPABILITIES,
63 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
66 uint8_t cryptodev_virtio_driver_id;
68 #define NUM_ENTRY_SYM_CREATE_SESSION 4
71 virtio_crypto_send_command(struct virtqueue *vq,
72 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
73 uint8_t *auth_key, struct virtio_crypto_session *session)
78 uint32_t len_cipher_key = 0;
79 uint32_t len_auth_key = 0;
80 uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
81 uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
82 uint32_t len_total = 0;
83 uint32_t input_offset = 0;
84 void *virt_addr_started = NULL;
85 phys_addr_t phys_addr_started;
86 struct vring_desc *desc;
88 struct virtio_crypto_session_input *input;
91 PMD_INIT_FUNC_TRACE();
93 if (session == NULL) {
94 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
97 /* cipher only is supported, it is available if auth_key is NULL */
99 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
103 head = vq->vq_desc_head_idx;
104 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
107 if (vq->vq_free_cnt < needed) {
108 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
112 /* calculate the length of cipher key */
114 switch (ctrl->u.sym_create_session.op_type) {
115 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
117 = ctrl->u.sym_create_session.u.cipher
120 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
122 = ctrl->u.sym_create_session.u.chain
123 .para.cipher_param.keylen;
126 VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
131 /* calculate the length of auth key */
134 ctrl->u.sym_create_session.u.chain.para.u.mac_param
139 * malloc memory to store indirect vring_desc entries, including
140 * ctrl request, cipher key, auth key, session input and desc vring
142 desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
144 virt_addr_started = rte_malloc(NULL,
145 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
146 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
147 if (virt_addr_started == NULL) {
148 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
151 phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
153 /* address to store indirect vring desc entries */
154 desc = (struct vring_desc *)
155 ((uint8_t *)virt_addr_started + desc_offset);
158 memcpy(virt_addr_started, ctrl, len_ctrl_req);
159 desc[idx].addr = phys_addr_started;
160 desc[idx].len = len_ctrl_req;
161 desc[idx].flags = VRING_DESC_F_NEXT;
162 desc[idx].next = idx + 1;
164 len_total += len_ctrl_req;
165 input_offset += len_ctrl_req;
167 /* cipher key part */
168 if (len_cipher_key > 0) {
169 memcpy((uint8_t *)virt_addr_started + len_total,
170 cipher_key, len_cipher_key);
172 desc[idx].addr = phys_addr_started + len_total;
173 desc[idx].len = len_cipher_key;
174 desc[idx].flags = VRING_DESC_F_NEXT;
175 desc[idx].next = idx + 1;
177 len_total += len_cipher_key;
178 input_offset += len_cipher_key;
182 if (len_auth_key > 0) {
183 memcpy((uint8_t *)virt_addr_started + len_total,
184 auth_key, len_auth_key);
186 desc[idx].addr = phys_addr_started + len_total;
187 desc[idx].len = len_auth_key;
188 desc[idx].flags = VRING_DESC_F_NEXT;
189 desc[idx].next = idx + 1;
191 len_total += len_auth_key;
192 input_offset += len_auth_key;
196 input = (struct virtio_crypto_session_input *)
197 ((uint8_t *)virt_addr_started + input_offset);
198 input->status = VIRTIO_CRYPTO_ERR;
199 input->session_id = ~0ULL;
200 desc[idx].addr = phys_addr_started + len_total;
201 desc[idx].len = len_session_input;
202 desc[idx].flags = VRING_DESC_F_WRITE;
205 /* use a single desc entry */
206 vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
207 vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
208 vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
211 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
213 vq_update_avail_ring(vq, head);
214 vq_update_avail_idx(vq);
216 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
219 virtqueue_notify(vq);
222 while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
227 while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
228 uint32_t idx, desc_idx, used_idx;
229 struct vring_used_elem *uep;
231 used_idx = (uint32_t)(vq->vq_used_cons_idx
232 & (vq->vq_nentries - 1));
233 uep = &vq->vq_ring.used->ring[used_idx];
234 idx = (uint32_t) uep->id;
237 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
238 desc_idx = vq->vq_ring.desc[desc_idx].next;
242 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
243 vq->vq_desc_head_idx = idx;
245 vq->vq_used_cons_idx++;
249 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
250 "vq->vq_desc_head_idx=%d",
251 vq->vq_free_cnt, vq->vq_desc_head_idx);
254 if (input->status != VIRTIO_CRYPTO_OK) {
255 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
256 "status=%u, session_id=%" PRIu64 "",
257 input->status, input->session_id);
258 rte_free(virt_addr_started);
261 session->session_id = input->session_id;
263 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
264 "session_id=%" PRIu64 "", input->session_id);
265 rte_free(virt_addr_started);
273 virtio_crypto_queue_release(struct virtqueue *vq)
275 struct virtio_crypto_hw *hw;
277 PMD_INIT_FUNC_TRACE();
281 /* Select and deactivate the queue */
282 VTPCI_OPS(hw)->del_queue(hw, vq);
284 rte_memzone_free(vq->mz);
285 rte_mempool_free(vq->mpool);
290 #define MPOOL_MAX_NAME_SZ 32
293 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
295 uint16_t vtpci_queue_idx,
298 struct virtqueue **pvq)
300 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
301 char mpool_name[MPOOL_MAX_NAME_SZ];
302 const struct rte_memzone *mz;
303 unsigned int vq_size, size;
304 struct virtio_crypto_hw *hw = dev->data->dev_private;
305 struct virtqueue *vq = NULL;
309 PMD_INIT_FUNC_TRACE();
311 VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
314 * Read the virtqueue size from the Queue Size field
315 * Always power of 2 and if 0 virtqueue does not exist
317 vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
319 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
322 VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
324 if (!rte_is_power_of_2(vq_size)) {
325 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
329 if (queue_type == VTCRYPTO_DATAQ) {
330 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
331 dev->data->dev_id, vtpci_queue_idx);
332 snprintf(mpool_name, sizeof(mpool_name),
333 "dev%d_dataqueue%d_mpool",
334 dev->data->dev_id, vtpci_queue_idx);
335 } else if (queue_type == VTCRYPTO_CTRLQ) {
336 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
338 snprintf(mpool_name, sizeof(mpool_name),
339 "dev%d_controlqueue_mpool",
342 size = RTE_ALIGN_CEIL(sizeof(*vq) +
343 vq_size * sizeof(struct vq_desc_extra),
344 RTE_CACHE_LINE_SIZE);
345 vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
348 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
352 if (queue_type == VTCRYPTO_DATAQ) {
353 /* pre-allocate a mempool and use it in the data plane to
354 * improve performance
356 vq->mpool = rte_mempool_lookup(mpool_name);
357 if (vq->mpool == NULL)
358 vq->mpool = rte_mempool_create(mpool_name,
360 sizeof(struct virtio_crypto_op_cookie),
361 RTE_CACHE_LINE_SIZE, 0,
362 NULL, NULL, NULL, NULL, socket_id,
365 VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
366 "Cannot create mempool");
367 goto mpool_create_err;
369 for (i = 0; i < vq_size; i++) {
370 vq->vq_descx[i].cookie =
371 rte_zmalloc("crypto PMD op cookie pointer",
372 sizeof(struct virtio_crypto_op_cookie),
373 RTE_CACHE_LINE_SIZE);
374 if (vq->vq_descx[i].cookie == NULL) {
375 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
376 "alloc mem for cookie");
377 goto cookie_alloc_err;
383 vq->dev_id = dev->data->dev_id;
384 vq->vq_queue_index = vtpci_queue_idx;
385 vq->vq_nentries = vq_size;
388 * Using part of the vring entries is permitted, but the maximum
391 if (nb_desc == 0 || nb_desc > vq_size)
393 vq->vq_free_cnt = nb_desc;
396 * Reserve a memzone for vring elements
398 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
399 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
400 VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
401 (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
402 size, vq->vq_ring_size);
404 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
405 socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
407 if (rte_errno == EEXIST)
408 mz = rte_memzone_lookup(vq_name);
410 VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
416 * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
417 * and only accepts 32 bit page frame number.
418 * Check if the allocated physical memory exceeds 16TB.
420 if ((mz->phys_addr + vq->vq_ring_size - 1)
421 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
422 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
427 memset(mz->addr, 0, sizeof(mz->len));
429 vq->vq_ring_mem = mz->phys_addr;
430 vq->vq_ring_virt_mem = mz->addr;
431 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
432 (uint64_t)mz->phys_addr);
433 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
434 (uint64_t)(uintptr_t)mz->addr);
441 rte_memzone_free(mz);
444 rte_mempool_free(vq->mpool);
446 for (j = 0; j < i; j++)
447 rte_free(vq->vq_descx[j].cookie);
455 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
458 struct virtqueue *vq;
459 struct virtio_crypto_hw *hw = dev->data->dev_private;
461 /* if virtio device has started, do not touch the virtqueues */
462 if (dev->data->dev_started)
465 PMD_INIT_FUNC_TRACE();
467 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
468 0, SOCKET_ID_ANY, &vq);
470 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
480 virtio_crypto_free_queues(struct rte_cryptodev *dev)
483 struct virtio_crypto_hw *hw = dev->data->dev_private;
485 PMD_INIT_FUNC_TRACE();
487 /* control queue release */
488 virtio_crypto_queue_release(hw->cvq);
490 /* data queue release */
491 for (i = 0; i < hw->max_dataqueues; i++)
492 virtio_crypto_queue_release(dev->data->queue_pairs[i]);
496 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
502 * dev_ops for virtio, bare necessities for basic operation
504 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
505 /* Device related operations */
506 .dev_configure = virtio_crypto_dev_configure,
507 .dev_start = virtio_crypto_dev_start,
508 .dev_stop = virtio_crypto_dev_stop,
509 .dev_close = virtio_crypto_dev_close,
510 .dev_infos_get = virtio_crypto_dev_info_get,
512 .stats_get = virtio_crypto_dev_stats_get,
513 .stats_reset = virtio_crypto_dev_stats_reset,
515 .queue_pair_setup = virtio_crypto_qp_setup,
516 .queue_pair_release = virtio_crypto_qp_release,
517 .queue_pair_count = NULL,
519 /* Crypto related operations */
520 .sym_session_get_size = virtio_crypto_sym_get_session_private_size,
521 .sym_session_configure = virtio_crypto_sym_configure_session,
522 .sym_session_clear = virtio_crypto_sym_clear_session
526 virtio_crypto_update_stats(struct rte_cryptodev *dev,
527 struct rte_cryptodev_stats *stats)
530 struct virtio_crypto_hw *hw = dev->data->dev_private;
532 PMD_INIT_FUNC_TRACE();
535 VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
539 for (i = 0; i < hw->max_dataqueues; i++) {
540 const struct virtqueue *data_queue
541 = dev->data->queue_pairs[i];
542 if (data_queue == NULL)
545 stats->enqueued_count += data_queue->packets_sent_total;
546 stats->enqueue_err_count += data_queue->packets_sent_failed;
548 stats->dequeued_count += data_queue->packets_received_total;
549 stats->dequeue_err_count
550 += data_queue->packets_received_failed;
555 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
556 struct rte_cryptodev_stats *stats)
558 PMD_INIT_FUNC_TRACE();
560 virtio_crypto_update_stats(dev, stats);
564 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
567 struct virtio_crypto_hw *hw = dev->data->dev_private;
569 PMD_INIT_FUNC_TRACE();
571 for (i = 0; i < hw->max_dataqueues; i++) {
572 struct virtqueue *data_queue = dev->data->queue_pairs[i];
573 if (data_queue == NULL)
576 data_queue->packets_sent_total = 0;
577 data_queue->packets_sent_failed = 0;
579 data_queue->packets_received_total = 0;
580 data_queue->packets_received_failed = 0;
585 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
586 const struct rte_cryptodev_qp_conf *qp_conf,
590 struct virtqueue *vq;
592 PMD_INIT_FUNC_TRACE();
594 /* if virtio dev is started, do not touch the virtqueues */
595 if (dev->data->dev_started)
598 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
599 qp_conf->nb_descriptors, socket_id, &vq);
601 VIRTIO_CRYPTO_INIT_LOG_ERR(
602 "virtio crypto data queue initialization failed\n");
606 dev->data->queue_pairs[queue_pair_id] = vq;
612 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
615 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
617 PMD_INIT_FUNC_TRACE();
620 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
624 virtio_crypto_queue_release(vq);
629 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
631 uint64_t host_features;
633 PMD_INIT_FUNC_TRACE();
635 /* Prepare guest_features: feature that driver wants to support */
636 VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
639 /* Read device(host) feature bits */
640 host_features = VTPCI_OPS(hw)->get_features(hw);
641 VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
645 * Negotiate features: Subset of device feature bits are written back
646 * guest feature bits.
648 hw->guest_features = req_features;
649 hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
651 VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
655 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
656 VIRTIO_CRYPTO_INIT_LOG_ERR(
657 "VIRTIO_F_VERSION_1 features is not enabled.");
660 vtpci_cryptodev_set_status(hw,
661 VIRTIO_CONFIG_STATUS_FEATURES_OK);
662 if (!(vtpci_cryptodev_get_status(hw) &
663 VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
664 VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
670 hw->req_guest_features = req_features;
675 /* reset device and renegotiate features if needed */
677 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
678 uint64_t req_features)
680 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
681 struct virtio_crypto_config local_config;
682 struct virtio_crypto_config *config = &local_config;
684 PMD_INIT_FUNC_TRACE();
686 /* Reset the device although not necessary at startup */
687 vtpci_cryptodev_reset(hw);
689 /* Tell the host we've noticed this device. */
690 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
692 /* Tell the host we've known how to drive the device. */
693 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
694 if (virtio_negotiate_features(hw, req_features) < 0)
697 /* Get status of the device */
698 vtpci_read_cryptodev_config(hw,
699 offsetof(struct virtio_crypto_config, status),
700 &config->status, sizeof(config->status));
701 if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
702 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
707 /* Get number of data queues */
708 vtpci_read_cryptodev_config(hw,
709 offsetof(struct virtio_crypto_config, max_dataqueues),
710 &config->max_dataqueues,
711 sizeof(config->max_dataqueues));
712 hw->max_dataqueues = config->max_dataqueues;
714 VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
721 * This function is based on probe() function
722 * It returns 0 on success.
725 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
726 struct rte_cryptodev_pmd_init_params *init_params)
728 struct rte_cryptodev *cryptodev;
729 struct virtio_crypto_hw *hw;
731 PMD_INIT_FUNC_TRACE();
733 cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
735 if (cryptodev == NULL)
738 cryptodev->driver_id = cryptodev_virtio_driver_id;
739 cryptodev->dev_ops = &virtio_crypto_dev_ops;
741 cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
742 cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
744 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
745 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
747 hw = cryptodev->data->dev_private;
748 hw->dev_id = cryptodev->data->dev_id;
749 hw->virtio_dev_capabilities = virtio_capabilities;
751 VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
752 cryptodev->data->dev_id, pci_dev->id.vendor_id,
753 pci_dev->id.device_id);
755 /* pci device init */
756 if (vtpci_cryptodev_init(pci_dev, hw))
759 if (virtio_crypto_init_device(cryptodev,
760 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
767 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
769 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
771 PMD_INIT_FUNC_TRACE();
773 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
776 if (cryptodev->data->dev_started) {
777 virtio_crypto_dev_stop(cryptodev);
778 virtio_crypto_dev_close(cryptodev);
781 cryptodev->dev_ops = NULL;
782 cryptodev->enqueue_burst = NULL;
783 cryptodev->dequeue_burst = NULL;
785 /* release control queue */
786 virtio_crypto_queue_release(hw->cvq);
788 rte_free(cryptodev->data);
789 cryptodev->data = NULL;
791 VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
797 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
798 struct rte_cryptodev_config *config __rte_unused)
800 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
802 PMD_INIT_FUNC_TRACE();
804 if (virtio_crypto_init_device(cryptodev,
805 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
808 /* setup control queue
809 * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
810 * config->max_dataqueues is the control queue
812 if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
813 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
816 virtio_crypto_ctrlq_start(cryptodev);
822 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
824 struct virtio_crypto_hw *hw = dev->data->dev_private;
826 PMD_INIT_FUNC_TRACE();
827 VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
829 vtpci_cryptodev_reset(hw);
831 virtio_crypto_dev_free_mbufs(dev);
832 virtio_crypto_free_queues(dev);
834 dev->data->dev_started = 0;
838 virtio_crypto_dev_start(struct rte_cryptodev *dev)
840 struct virtio_crypto_hw *hw = dev->data->dev_private;
842 if (dev->data->dev_started)
845 /* Do final configuration before queue engine starts */
846 virtio_crypto_dataq_start(dev);
847 vtpci_cryptodev_reinit_complete(hw);
849 dev->data->dev_started = 1;
855 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
858 struct virtio_crypto_hw *hw = dev->data->dev_private;
860 for (i = 0; i < hw->max_dataqueues; i++) {
861 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
862 "and unused buf", i);
863 VIRTQUEUE_DUMP((struct virtqueue *)
864 dev->data->queue_pairs[i]);
866 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
867 i, dev->data->queue_pairs[i]);
869 virtqueue_detatch_unused(dev->data->queue_pairs[i]);
871 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
874 (struct virtqueue *)dev->data->queue_pairs[i]);
879 virtio_crypto_sym_get_session_private_size(
880 struct rte_cryptodev *dev __rte_unused)
882 PMD_INIT_FUNC_TRACE();
884 return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
888 virtio_crypto_check_sym_session_paras(
889 struct rte_cryptodev *dev)
891 struct virtio_crypto_hw *hw;
893 PMD_INIT_FUNC_TRACE();
895 if (unlikely(dev == NULL)) {
896 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
899 if (unlikely(dev->data == NULL)) {
900 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
903 hw = dev->data->dev_private;
904 if (unlikely(hw == NULL)) {
905 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
908 if (unlikely(hw->cvq == NULL)) {
909 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
917 virtio_crypto_check_sym_clear_session_paras(
918 struct rte_cryptodev *dev,
919 struct rte_cryptodev_sym_session *sess)
921 PMD_INIT_FUNC_TRACE();
924 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
928 return virtio_crypto_check_sym_session_paras(dev);
931 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
934 virtio_crypto_sym_clear_session(
935 struct rte_cryptodev *dev,
936 struct rte_cryptodev_sym_session *sess)
938 struct virtio_crypto_hw *hw;
939 struct virtqueue *vq;
940 struct virtio_crypto_session *session;
941 struct virtio_crypto_op_ctrl_req *ctrl;
942 struct vring_desc *desc;
946 uint8_t *malloc_virt_addr;
947 uint64_t malloc_phys_addr;
948 uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
949 uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
950 uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
952 PMD_INIT_FUNC_TRACE();
954 if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
957 hw = dev->data->dev_private;
959 session = (struct virtio_crypto_session *)get_sym_session_private_data(
960 sess, cryptodev_virtio_driver_id);
961 if (session == NULL) {
962 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
966 VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
967 "vq = %p", vq->vq_desc_head_idx, vq);
969 if (vq->vq_free_cnt < needed) {
970 VIRTIO_CRYPTO_SESSION_LOG_ERR(
971 "vq->vq_free_cnt = %d is less than %d, "
972 "not enough", vq->vq_free_cnt, needed);
977 * malloc memory to store information of ctrl request op,
978 * returned status and desc vring
980 malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
981 + NUM_ENTRY_SYM_CLEAR_SESSION
982 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
983 if (malloc_virt_addr == NULL) {
984 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
987 malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
989 /* assign ctrl request op part */
990 ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
991 ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
992 /* default data virtqueue is 0 */
993 ctrl->header.queue_id = 0;
994 ctrl->u.destroy_session.session_id = session->session_id;
997 status = &(((struct virtio_crypto_inhdr *)
998 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
999 *status = VIRTIO_CRYPTO_ERR;
1001 /* indirect desc vring part */
1002 desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
1005 /* ctrl request part */
1006 desc[0].addr = malloc_phys_addr;
1007 desc[0].len = len_op_ctrl_req;
1008 desc[0].flags = VRING_DESC_F_NEXT;
1012 desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
1013 desc[1].len = len_inhdr;
1014 desc[1].flags = VRING_DESC_F_WRITE;
1016 /* use only a single desc entry */
1017 head = vq->vq_desc_head_idx;
1018 vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
1019 vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
1020 vq->vq_ring.desc[head].len
1021 = NUM_ENTRY_SYM_CLEAR_SESSION
1022 * sizeof(struct vring_desc);
1023 vq->vq_free_cnt -= needed;
1025 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
1027 vq_update_avail_ring(vq, head);
1028 vq_update_avail_idx(vq);
1030 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
1031 vq->vq_queue_index);
1033 virtqueue_notify(vq);
1036 while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
1041 while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
1042 uint32_t idx, desc_idx, used_idx;
1043 struct vring_used_elem *uep;
1045 used_idx = (uint32_t)(vq->vq_used_cons_idx
1046 & (vq->vq_nentries - 1));
1047 uep = &vq->vq_ring.used->ring[used_idx];
1048 idx = (uint32_t) uep->id;
1050 while (vq->vq_ring.desc[desc_idx].flags
1051 & VRING_DESC_F_NEXT) {
1052 desc_idx = vq->vq_ring.desc[desc_idx].next;
1056 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
1057 vq->vq_desc_head_idx = idx;
1058 vq->vq_used_cons_idx++;
1062 if (*status != VIRTIO_CRYPTO_OK) {
1063 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1064 "status=%"PRIu32", session_id=%"PRIu64"",
1065 *status, session->session_id);
1066 rte_free(malloc_virt_addr);
1070 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
1071 "vq->vq_desc_head_idx=%d",
1072 vq->vq_free_cnt, vq->vq_desc_head_idx);
1074 VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1075 session->session_id);
1077 memset(session, 0, sizeof(struct virtio_crypto_session));
1078 struct rte_mempool *sess_mp = rte_mempool_from_obj(session);
1079 set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL);
1080 rte_mempool_put(sess_mp, session);
1081 rte_free(malloc_virt_addr);
1084 static struct rte_crypto_cipher_xform *
1085 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1088 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1089 return &xform->cipher;
1091 xform = xform->next;
1097 static struct rte_crypto_auth_xform *
1098 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1101 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1102 return &xform->auth;
1104 xform = xform->next;
1110 /** Get xform chain order */
1112 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1118 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1119 xform->next == NULL)
1120 return VIRTIO_CRYPTO_CMD_CIPHER;
1122 /* Authentication Only */
1123 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1124 xform->next == NULL)
1125 return VIRTIO_CRYPTO_CMD_AUTH;
1127 /* Authenticate then Cipher */
1128 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1129 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1130 return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1132 /* Cipher then Authenticate */
1133 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1134 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1135 return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1141 virtio_crypto_sym_pad_cipher_param(
1142 struct virtio_crypto_cipher_session_para *para,
1143 struct rte_crypto_cipher_xform *cipher_xform)
1145 switch (cipher_xform->algo) {
1146 case RTE_CRYPTO_CIPHER_AES_CBC:
1147 para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
1150 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1151 "Cipher alg %u", cipher_xform->algo);
1155 para->keylen = cipher_xform->key.length;
1156 switch (cipher_xform->op) {
1157 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1158 para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1160 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1161 para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1164 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1173 virtio_crypto_sym_pad_auth_param(
1174 struct virtio_crypto_op_ctrl_req *ctrl,
1175 struct rte_crypto_auth_xform *auth_xform)
1178 struct virtio_crypto_alg_chain_session_para *para =
1179 &(ctrl->u.sym_create_session.u.chain.para);
1181 switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1182 case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1183 algo = &(para->u.hash_param.algo);
1185 case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1186 algo = &(para->u.mac_param.algo);
1189 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1191 ctrl->u.sym_create_session.u.chain.para.hash_mode);
1195 switch (auth_xform->algo) {
1196 case RTE_CRYPTO_AUTH_SHA1_HMAC:
1197 *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
1200 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1201 "Crypto: Undefined Hash algo %u specified",
1210 virtio_crypto_sym_pad_op_ctrl_req(
1211 struct virtio_crypto_op_ctrl_req *ctrl,
1212 struct rte_crypto_sym_xform *xform, bool is_chainned,
1213 uint8_t *cipher_key_data, uint8_t *auth_key_data,
1214 struct virtio_crypto_session *session)
1217 struct rte_crypto_auth_xform *auth_xform = NULL;
1218 struct rte_crypto_cipher_xform *cipher_xform = NULL;
1220 /* Get cipher xform from crypto xform chain */
1221 cipher_xform = virtio_crypto_get_cipher_xform(xform);
1223 if (cipher_xform->key.length > VIRTIO_CRYPTO_MAX_KEY_SIZE) {
1224 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1225 "cipher key size cannot be longer than %u",
1226 VIRTIO_CRYPTO_MAX_KEY_SIZE);
1229 if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
1230 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1231 "cipher IV size cannot be longer than %u",
1232 VIRTIO_CRYPTO_MAX_IV_SIZE);
1236 ret = virtio_crypto_sym_pad_cipher_param(
1237 &ctrl->u.sym_create_session.u.chain.para
1238 .cipher_param, cipher_xform);
1240 ret = virtio_crypto_sym_pad_cipher_param(
1241 &ctrl->u.sym_create_session.u.cipher.para,
1245 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1246 "pad cipher parameter failed");
1250 memcpy(cipher_key_data, cipher_xform->key.data,
1251 cipher_xform->key.length);
1253 session->iv.offset = cipher_xform->iv.offset;
1254 session->iv.length = cipher_xform->iv.length;
1257 /* Get auth xform from crypto xform chain */
1258 auth_xform = virtio_crypto_get_auth_xform(xform);
1260 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1261 struct virtio_crypto_alg_chain_session_para *para =
1262 &(ctrl->u.sym_create_session.u.chain.para);
1263 if (auth_xform->key.length) {
1264 if (auth_xform->key.length >
1265 VIRTIO_CRYPTO_MAX_KEY_SIZE) {
1266 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1267 "auth key size cannot be longer than %u",
1268 VIRTIO_CRYPTO_MAX_KEY_SIZE);
1271 para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1272 para->u.mac_param.auth_key_len =
1273 (uint32_t)auth_xform->key.length;
1274 para->u.mac_param.hash_result_len =
1275 auth_xform->digest_length;
1276 memcpy(auth_key_data, auth_xform->key.data,
1277 auth_xform->key.length);
1279 para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1280 para->u.hash_param.hash_result_len =
1281 auth_xform->digest_length;
1284 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1286 VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1296 virtio_crypto_check_sym_configure_session_paras(
1297 struct rte_cryptodev *dev,
1298 struct rte_crypto_sym_xform *xform,
1299 struct rte_cryptodev_sym_session *sym_sess,
1300 struct rte_mempool *mempool)
1302 if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
1303 unlikely(mempool == NULL)) {
1304 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1308 if (virtio_crypto_check_sym_session_paras(dev) < 0)
1315 virtio_crypto_sym_configure_session(
1316 struct rte_cryptodev *dev,
1317 struct rte_crypto_sym_xform *xform,
1318 struct rte_cryptodev_sym_session *sess,
1319 struct rte_mempool *mempool)
1322 struct virtio_crypto_session crypto_sess;
1323 void *session_private = &crypto_sess;
1324 struct virtio_crypto_session *session;
1325 struct virtio_crypto_op_ctrl_req *ctrl_req;
1326 enum virtio_crypto_cmd_id cmd_id;
1327 uint8_t cipher_key_data[VIRTIO_CRYPTO_MAX_KEY_SIZE] = {0};
1328 uint8_t auth_key_data[VIRTIO_CRYPTO_MAX_KEY_SIZE] = {0};
1329 struct virtio_crypto_hw *hw;
1330 struct virtqueue *control_vq;
1332 PMD_INIT_FUNC_TRACE();
1334 ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1337 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1341 if (rte_mempool_get(mempool, &session_private)) {
1342 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1343 "Couldn't get object from session mempool");
1347 session = (struct virtio_crypto_session *)session_private;
1348 memset(session, 0, sizeof(struct virtio_crypto_session));
1349 ctrl_req = &session->ctrl;
1350 ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1351 /* FIXME: support multiqueue */
1352 ctrl_req->header.queue_id = 0;
1354 hw = dev->data->dev_private;
1355 control_vq = hw->cvq;
1357 cmd_id = virtio_crypto_get_chain_order(xform);
1358 if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1359 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1360 = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1361 if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1362 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1363 = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1366 case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1367 case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1368 ctrl_req->u.sym_create_session.op_type
1369 = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1371 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1372 xform, true, cipher_key_data, auth_key_data, session);
1374 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1375 "padding sym op ctrl req failed");
1378 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1379 cipher_key_data, auth_key_data, session);
1381 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1382 "create session failed: %d", ret);
1386 case VIRTIO_CRYPTO_CMD_CIPHER:
1387 ctrl_req->u.sym_create_session.op_type
1388 = VIRTIO_CRYPTO_SYM_OP_CIPHER;
1389 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1390 false, cipher_key_data, auth_key_data, session);
1392 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1393 "padding sym op ctrl req failed");
1396 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1397 cipher_key_data, NULL, session);
1399 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1400 "create session failed: %d", ret);
1405 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1406 "Unsupported operation chain order parameter");
1410 set_sym_session_private_data(sess, dev->driver_id,
1420 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1421 struct rte_cryptodev_info *info)
1423 struct virtio_crypto_hw *hw = dev->data->dev_private;
1425 PMD_INIT_FUNC_TRACE();
1428 info->driver_id = cryptodev_virtio_driver_id;
1429 info->feature_flags = dev->feature_flags;
1430 info->max_nb_queue_pairs = hw->max_dataqueues;
1431 /* No limit of number of sessions */
1432 info->sym.max_nb_sessions = 0;
1433 info->capabilities = hw->virtio_dev_capabilities;
1438 crypto_virtio_pci_probe(
1439 struct rte_pci_driver *pci_drv __rte_unused,
1440 struct rte_pci_device *pci_dev)
1442 struct rte_cryptodev_pmd_init_params init_params = {
1444 .socket_id = pci_dev->device.numa_node,
1445 .private_data_size = sizeof(struct virtio_crypto_hw)
1447 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1449 VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1451 pci_dev->addr.devid,
1452 pci_dev->addr.function);
1454 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1456 return crypto_virtio_create(name, pci_dev, &init_params);
1460 crypto_virtio_pci_remove(
1461 struct rte_pci_device *pci_dev __rte_unused)
1463 struct rte_cryptodev *cryptodev;
1464 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1466 if (pci_dev == NULL)
1469 rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1470 sizeof(cryptodev_name));
1472 cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1473 if (cryptodev == NULL)
1476 return virtio_crypto_dev_uninit(cryptodev);
1479 static struct rte_pci_driver rte_virtio_crypto_driver = {
1480 .id_table = pci_id_virtio_crypto_map,
1482 .probe = crypto_virtio_pci_probe,
1483 .remove = crypto_virtio_pci_remove
1486 static struct cryptodev_driver virtio_crypto_drv;
1488 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1489 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1490 rte_virtio_crypto_driver.driver,
1491 cryptodev_virtio_driver_id);
1493 RTE_INIT(virtio_crypto_init_log)
1495 virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
1496 if (virtio_crypto_logtype_init >= 0)
1497 rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
1499 virtio_crypto_logtype_session =
1500 rte_log_register("pmd.crypto.virtio.session");
1501 if (virtio_crypto_logtype_session >= 0)
1502 rte_log_set_level(virtio_crypto_logtype_session,
1505 virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
1506 if (virtio_crypto_logtype_rx >= 0)
1507 rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
1509 virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
1510 if (virtio_crypto_logtype_tx >= 0)
1511 rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
1513 virtio_crypto_logtype_driver =
1514 rte_log_register("pmd.crypto.virtio.driver");
1515 if (virtio_crypto_logtype_driver >= 0)
1516 rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);