1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
7 #include <rte_common.h>
10 #include <rte_bus_pci.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
19 int virtio_crypto_logtype_init;
20 int virtio_crypto_logtype_session;
21 int virtio_crypto_logtype_rx;
22 int virtio_crypto_logtype_tx;
23 int virtio_crypto_logtype_driver;
25 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
26 struct rte_cryptodev_config *config);
27 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
28 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
29 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
30 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
31 struct rte_cryptodev_info *dev_info);
32 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
33 struct rte_cryptodev_stats *stats);
34 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
35 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
36 uint16_t queue_pair_id,
37 const struct rte_cryptodev_qp_conf *qp_conf,
39 struct rte_mempool *session_pool);
40 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
41 uint16_t queue_pair_id);
42 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
43 static unsigned int virtio_crypto_sym_get_session_private_size(
44 struct rte_cryptodev *dev);
45 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
46 struct rte_cryptodev_sym_session *sess);
47 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
48 struct rte_crypto_sym_xform *xform,
49 struct rte_cryptodev_sym_session *session,
50 struct rte_mempool *mp);
53 * The set of PCI devices this driver supports
55 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
56 { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
57 VIRTIO_CRYPTO_PCI_DEVICEID) },
58 { .vendor_id = 0, /* sentinel */ },
61 uint8_t cryptodev_virtio_driver_id;
63 #define NUM_ENTRY_SYM_CREATE_SESSION 4
66 virtio_crypto_send_command(struct virtqueue *vq,
67 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
68 uint8_t *auth_key, struct virtio_crypto_session *session)
73 uint32_t len_cipher_key = 0;
74 uint32_t len_auth_key = 0;
75 uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
76 uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
77 uint32_t len_total = 0;
78 uint32_t input_offset = 0;
79 void *virt_addr_started = NULL;
80 phys_addr_t phys_addr_started;
81 struct vring_desc *desc;
83 struct virtio_crypto_session_input *input;
86 PMD_INIT_FUNC_TRACE();
88 if (session == NULL) {
89 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
92 /* cipher only is supported, it is available if auth_key is NULL */
94 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
98 head = vq->vq_desc_head_idx;
99 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
102 if (vq->vq_free_cnt < needed) {
103 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
107 /* calculate the length of cipher key */
109 switch (ctrl->u.sym_create_session.op_type) {
110 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
112 = ctrl->u.sym_create_session.u.cipher
115 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
117 = ctrl->u.sym_create_session.u.chain
118 .para.cipher_param.keylen;
121 VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
126 /* calculate the length of auth key */
129 ctrl->u.sym_create_session.u.chain.para.u.mac_param
134 * malloc memory to store indirect vring_desc entries, including
135 * ctrl request, cipher key, auth key, session input and desc vring
137 desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
139 virt_addr_started = rte_malloc(NULL,
140 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
141 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
142 if (virt_addr_started == NULL) {
143 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
146 phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
148 /* address to store indirect vring desc entries */
149 desc = (struct vring_desc *)
150 ((uint8_t *)virt_addr_started + desc_offset);
153 memcpy(virt_addr_started, ctrl, len_ctrl_req);
154 desc[idx].addr = phys_addr_started;
155 desc[idx].len = len_ctrl_req;
156 desc[idx].flags = VRING_DESC_F_NEXT;
157 desc[idx].next = idx + 1;
159 len_total += len_ctrl_req;
160 input_offset += len_ctrl_req;
162 /* cipher key part */
163 if (len_cipher_key > 0) {
164 memcpy((uint8_t *)virt_addr_started + len_total,
165 cipher_key, len_cipher_key);
167 desc[idx].addr = phys_addr_started + len_total;
168 desc[idx].len = len_cipher_key;
169 desc[idx].flags = VRING_DESC_F_NEXT;
170 desc[idx].next = idx + 1;
172 len_total += len_cipher_key;
173 input_offset += len_cipher_key;
177 if (len_auth_key > 0) {
178 memcpy((uint8_t *)virt_addr_started + len_total,
179 auth_key, len_auth_key);
181 desc[idx].addr = phys_addr_started + len_total;
182 desc[idx].len = len_auth_key;
183 desc[idx].flags = VRING_DESC_F_NEXT;
184 desc[idx].next = idx + 1;
186 len_total += len_auth_key;
187 input_offset += len_auth_key;
191 input = (struct virtio_crypto_session_input *)
192 ((uint8_t *)virt_addr_started + input_offset);
193 input->status = VIRTIO_CRYPTO_ERR;
194 input->session_id = ~0ULL;
195 desc[idx].addr = phys_addr_started + len_total;
196 desc[idx].len = len_session_input;
197 desc[idx].flags = VRING_DESC_F_WRITE;
200 /* use a single desc entry */
201 vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
202 vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
203 vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
206 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
208 vq_update_avail_ring(vq, head);
209 vq_update_avail_idx(vq);
211 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
214 virtqueue_notify(vq);
217 while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
222 while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
223 uint32_t idx, desc_idx, used_idx;
224 struct vring_used_elem *uep;
226 used_idx = (uint32_t)(vq->vq_used_cons_idx
227 & (vq->vq_nentries - 1));
228 uep = &vq->vq_ring.used->ring[used_idx];
229 idx = (uint32_t) uep->id;
232 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
233 desc_idx = vq->vq_ring.desc[desc_idx].next;
237 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
238 vq->vq_desc_head_idx = idx;
240 vq->vq_used_cons_idx++;
244 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
245 "vq->vq_desc_head_idx=%d",
246 vq->vq_free_cnt, vq->vq_desc_head_idx);
249 if (input->status != VIRTIO_CRYPTO_OK) {
250 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
251 "status=%u, session_id=%" PRIu64 "",
252 input->status, input->session_id);
253 rte_free(virt_addr_started);
256 session->session_id = input->session_id;
258 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
259 "session_id=%" PRIu64 "", input->session_id);
260 rte_free(virt_addr_started);
268 virtio_crypto_queue_release(struct virtqueue *vq)
270 struct virtio_crypto_hw *hw;
272 PMD_INIT_FUNC_TRACE();
276 /* Select and deactivate the queue */
277 VTPCI_OPS(hw)->del_queue(hw, vq);
279 rte_memzone_free(vq->mz);
280 rte_mempool_free(vq->mpool);
285 #define MPOOL_MAX_NAME_SZ 32
288 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
290 uint16_t vtpci_queue_idx,
293 struct virtqueue **pvq)
295 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
296 char mpool_name[MPOOL_MAX_NAME_SZ];
297 const struct rte_memzone *mz;
298 unsigned int vq_size, size;
299 struct virtio_crypto_hw *hw = dev->data->dev_private;
300 struct virtqueue *vq = NULL;
304 PMD_INIT_FUNC_TRACE();
306 VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
309 * Read the virtqueue size from the Queue Size field
310 * Always power of 2 and if 0 virtqueue does not exist
312 vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
314 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
317 VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
319 if (!rte_is_power_of_2(vq_size)) {
320 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
324 if (queue_type == VTCRYPTO_DATAQ) {
325 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
326 dev->data->dev_id, vtpci_queue_idx);
327 snprintf(mpool_name, sizeof(mpool_name),
328 "dev%d_dataqueue%d_mpool",
329 dev->data->dev_id, vtpci_queue_idx);
330 } else if (queue_type == VTCRYPTO_CTRLQ) {
331 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
333 snprintf(mpool_name, sizeof(mpool_name),
334 "dev%d_controlqueue_mpool",
337 size = RTE_ALIGN_CEIL(sizeof(*vq) +
338 vq_size * sizeof(struct vq_desc_extra),
339 RTE_CACHE_LINE_SIZE);
340 vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
343 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
347 if (queue_type == VTCRYPTO_DATAQ) {
348 /* pre-allocate a mempool and use it in the data plane to
349 * improve performance
351 vq->mpool = rte_mempool_lookup(mpool_name);
352 if (vq->mpool == NULL)
353 vq->mpool = rte_mempool_create(mpool_name,
355 sizeof(struct virtio_crypto_op_cookie),
356 RTE_CACHE_LINE_SIZE, 0,
357 NULL, NULL, NULL, NULL, socket_id,
360 VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
361 "Cannot create mempool");
362 goto mpool_create_err;
364 for (i = 0; i < vq_size; i++) {
365 vq->vq_descx[i].cookie =
366 rte_zmalloc("crypto PMD op cookie pointer",
367 sizeof(struct virtio_crypto_op_cookie),
368 RTE_CACHE_LINE_SIZE);
369 if (vq->vq_descx[i].cookie == NULL) {
370 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
371 "alloc mem for cookie");
372 goto cookie_alloc_err;
378 vq->dev_id = dev->data->dev_id;
379 vq->vq_queue_index = vtpci_queue_idx;
380 vq->vq_nentries = vq_size;
383 * Using part of the vring entries is permitted, but the maximum
386 if (nb_desc == 0 || nb_desc > vq_size)
388 vq->vq_free_cnt = nb_desc;
391 * Reserve a memzone for vring elements
393 size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
394 vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
395 VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
396 (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
397 size, vq->vq_ring_size);
399 mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
400 socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
402 if (rte_errno == EEXIST)
403 mz = rte_memzone_lookup(vq_name);
405 VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
411 * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
412 * and only accepts 32 bit page frame number.
413 * Check if the allocated physical memory exceeds 16TB.
415 if ((mz->phys_addr + vq->vq_ring_size - 1)
416 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
417 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
422 memset(mz->addr, 0, sizeof(mz->len));
424 vq->vq_ring_mem = mz->phys_addr;
425 vq->vq_ring_virt_mem = mz->addr;
426 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
427 (uint64_t)mz->phys_addr);
428 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
429 (uint64_t)(uintptr_t)mz->addr);
436 rte_memzone_free(mz);
439 rte_mempool_free(vq->mpool);
441 for (j = 0; j < i; j++)
442 rte_free(vq->vq_descx[j].cookie);
450 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
453 struct virtqueue *vq;
454 struct virtio_crypto_hw *hw = dev->data->dev_private;
456 /* if virtio device has started, do not touch the virtqueues */
457 if (dev->data->dev_started)
460 PMD_INIT_FUNC_TRACE();
462 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
463 0, SOCKET_ID_ANY, &vq);
465 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
475 virtio_crypto_free_queues(struct rte_cryptodev *dev)
478 struct virtio_crypto_hw *hw = dev->data->dev_private;
480 PMD_INIT_FUNC_TRACE();
482 /* control queue release */
483 virtio_crypto_queue_release(hw->cvq);
485 /* data queue release */
486 for (i = 0; i < hw->max_dataqueues; i++)
487 virtio_crypto_queue_release(dev->data->queue_pairs[i]);
491 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
497 * dev_ops for virtio, bare necessities for basic operation
499 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
500 /* Device related operations */
501 .dev_configure = virtio_crypto_dev_configure,
502 .dev_start = virtio_crypto_dev_start,
503 .dev_stop = virtio_crypto_dev_stop,
504 .dev_close = virtio_crypto_dev_close,
505 .dev_infos_get = virtio_crypto_dev_info_get,
507 .stats_get = virtio_crypto_dev_stats_get,
508 .stats_reset = virtio_crypto_dev_stats_reset,
510 .queue_pair_setup = virtio_crypto_qp_setup,
511 .queue_pair_release = virtio_crypto_qp_release,
512 .queue_pair_start = NULL,
513 .queue_pair_stop = NULL,
514 .queue_pair_count = NULL,
516 /* Crypto related operations */
517 .session_get_size = virtio_crypto_sym_get_session_private_size,
518 .session_configure = virtio_crypto_sym_configure_session,
519 .session_clear = virtio_crypto_sym_clear_session,
520 .qp_attach_session = NULL,
521 .qp_detach_session = NULL
525 virtio_crypto_update_stats(struct rte_cryptodev *dev,
526 struct rte_cryptodev_stats *stats)
529 struct virtio_crypto_hw *hw = dev->data->dev_private;
531 PMD_INIT_FUNC_TRACE();
534 VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
538 for (i = 0; i < hw->max_dataqueues; i++) {
539 const struct virtqueue *data_queue
540 = dev->data->queue_pairs[i];
541 if (data_queue == NULL)
544 stats->enqueued_count += data_queue->packets_sent_total;
545 stats->enqueue_err_count += data_queue->packets_sent_failed;
547 stats->dequeued_count += data_queue->packets_received_total;
548 stats->dequeue_err_count
549 += data_queue->packets_received_failed;
554 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
555 struct rte_cryptodev_stats *stats)
557 PMD_INIT_FUNC_TRACE();
559 virtio_crypto_update_stats(dev, stats);
563 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
566 struct virtio_crypto_hw *hw = dev->data->dev_private;
568 PMD_INIT_FUNC_TRACE();
570 for (i = 0; i < hw->max_dataqueues; i++) {
571 struct virtqueue *data_queue = dev->data->queue_pairs[i];
572 if (data_queue == NULL)
575 data_queue->packets_sent_total = 0;
576 data_queue->packets_sent_failed = 0;
578 data_queue->packets_received_total = 0;
579 data_queue->packets_received_failed = 0;
584 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
585 const struct rte_cryptodev_qp_conf *qp_conf,
587 struct rte_mempool *session_pool __rte_unused)
590 struct virtqueue *vq;
592 PMD_INIT_FUNC_TRACE();
594 /* if virtio dev is started, do not touch the virtqueues */
595 if (dev->data->dev_started)
598 ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
599 qp_conf->nb_descriptors, socket_id, &vq);
601 VIRTIO_CRYPTO_INIT_LOG_ERR(
602 "virtio crypto data queue initialization failed\n");
606 dev->data->queue_pairs[queue_pair_id] = vq;
612 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
615 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
617 PMD_INIT_FUNC_TRACE();
620 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
624 virtio_crypto_queue_release(vq);
629 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
631 uint64_t host_features;
633 PMD_INIT_FUNC_TRACE();
635 /* Prepare guest_features: feature that driver wants to support */
636 VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
639 /* Read device(host) feature bits */
640 host_features = VTPCI_OPS(hw)->get_features(hw);
641 VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
645 * Negotiate features: Subset of device feature bits are written back
646 * guest feature bits.
648 hw->guest_features = req_features;
649 hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
651 VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
655 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
656 VIRTIO_CRYPTO_INIT_LOG_ERR(
657 "VIRTIO_F_VERSION_1 features is not enabled.");
660 vtpci_cryptodev_set_status(hw,
661 VIRTIO_CONFIG_STATUS_FEATURES_OK);
662 if (!(vtpci_cryptodev_get_status(hw) &
663 VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
664 VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
670 hw->req_guest_features = req_features;
675 /* reset device and renegotiate features if needed */
677 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
678 uint64_t req_features)
680 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
681 struct virtio_crypto_config local_config;
682 struct virtio_crypto_config *config = &local_config;
684 PMD_INIT_FUNC_TRACE();
686 /* Reset the device although not necessary at startup */
687 vtpci_cryptodev_reset(hw);
689 /* Tell the host we've noticed this device. */
690 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
692 /* Tell the host we've known how to drive the device. */
693 vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
694 if (virtio_negotiate_features(hw, req_features) < 0)
697 /* Get status of the device */
698 vtpci_read_cryptodev_config(hw,
699 offsetof(struct virtio_crypto_config, status),
700 &config->status, sizeof(config->status));
701 if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
702 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
707 /* Get number of data queues */
708 vtpci_read_cryptodev_config(hw,
709 offsetof(struct virtio_crypto_config, max_dataqueues),
710 &config->max_dataqueues,
711 sizeof(config->max_dataqueues));
712 hw->max_dataqueues = config->max_dataqueues;
714 VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
721 * This function is based on probe() function
722 * It returns 0 on success.
725 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
726 struct rte_cryptodev_pmd_init_params *init_params)
728 struct rte_cryptodev *cryptodev;
729 struct virtio_crypto_hw *hw;
731 PMD_INIT_FUNC_TRACE();
733 cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
735 if (cryptodev == NULL)
738 cryptodev->driver_id = cryptodev_virtio_driver_id;
739 cryptodev->dev_ops = &virtio_crypto_dev_ops;
741 cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
742 cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
744 cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
745 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
747 hw = cryptodev->data->dev_private;
748 hw->dev_id = cryptodev->data->dev_id;
750 VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
751 cryptodev->data->dev_id, pci_dev->id.vendor_id,
752 pci_dev->id.device_id);
754 /* pci device init */
755 if (vtpci_cryptodev_init(pci_dev, hw))
758 if (virtio_crypto_init_device(cryptodev,
759 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
766 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
768 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
770 PMD_INIT_FUNC_TRACE();
772 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
775 if (cryptodev->data->dev_started) {
776 virtio_crypto_dev_stop(cryptodev);
777 virtio_crypto_dev_close(cryptodev);
780 cryptodev->dev_ops = NULL;
781 cryptodev->enqueue_burst = NULL;
782 cryptodev->dequeue_burst = NULL;
784 /* release control queue */
785 virtio_crypto_queue_release(hw->cvq);
787 rte_free(cryptodev->data);
788 cryptodev->data = NULL;
790 VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
796 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
797 struct rte_cryptodev_config *config __rte_unused)
799 struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
801 PMD_INIT_FUNC_TRACE();
803 if (virtio_crypto_init_device(cryptodev,
804 VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
807 /* setup control queue
808 * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
809 * config->max_dataqueues is the control queue
811 if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
812 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
815 virtio_crypto_ctrlq_start(cryptodev);
821 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
823 struct virtio_crypto_hw *hw = dev->data->dev_private;
825 PMD_INIT_FUNC_TRACE();
826 VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
828 vtpci_cryptodev_reset(hw);
830 virtio_crypto_dev_free_mbufs(dev);
831 virtio_crypto_free_queues(dev);
833 dev->data->dev_started = 0;
837 virtio_crypto_dev_start(struct rte_cryptodev *dev)
839 struct virtio_crypto_hw *hw = dev->data->dev_private;
841 if (dev->data->dev_started)
844 /* Do final configuration before queue engine starts */
845 virtio_crypto_dataq_start(dev);
846 vtpci_cryptodev_reinit_complete(hw);
848 dev->data->dev_started = 1;
854 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
857 struct virtio_crypto_hw *hw = dev->data->dev_private;
859 for (i = 0; i < hw->max_dataqueues; i++) {
860 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
861 "and unused buf", i);
862 VIRTQUEUE_DUMP((struct virtqueue *)
863 dev->data->queue_pairs[i]);
865 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
866 i, dev->data->queue_pairs[i]);
868 virtqueue_detatch_unused(dev->data->queue_pairs[i]);
870 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
873 (struct virtqueue *)dev->data->queue_pairs[i]);
878 virtio_crypto_sym_get_session_private_size(
879 struct rte_cryptodev *dev __rte_unused)
881 PMD_INIT_FUNC_TRACE();
883 return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
887 virtio_crypto_check_sym_session_paras(
888 struct rte_cryptodev *dev)
890 struct virtio_crypto_hw *hw;
892 PMD_INIT_FUNC_TRACE();
894 if (unlikely(dev == NULL)) {
895 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
898 if (unlikely(dev->data == NULL)) {
899 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
902 hw = dev->data->dev_private;
903 if (unlikely(hw == NULL)) {
904 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
907 if (unlikely(hw->cvq == NULL)) {
908 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
916 virtio_crypto_check_sym_clear_session_paras(
917 struct rte_cryptodev *dev,
918 struct rte_cryptodev_sym_session *sess)
920 PMD_INIT_FUNC_TRACE();
923 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
927 return virtio_crypto_check_sym_session_paras(dev);
930 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
933 virtio_crypto_sym_clear_session(
934 struct rte_cryptodev *dev,
935 struct rte_cryptodev_sym_session *sess)
937 struct virtio_crypto_hw *hw;
938 struct virtqueue *vq;
939 struct virtio_crypto_session *session;
940 struct virtio_crypto_op_ctrl_req *ctrl;
941 struct vring_desc *desc;
945 uint8_t *malloc_virt_addr;
946 uint64_t malloc_phys_addr;
947 uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
948 uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
949 uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
951 PMD_INIT_FUNC_TRACE();
953 if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
956 hw = dev->data->dev_private;
958 session = (struct virtio_crypto_session *)get_session_private_data(
959 sess, cryptodev_virtio_driver_id);
960 if (session == NULL) {
961 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
965 VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
966 "vq = %p", vq->vq_desc_head_idx, vq);
968 if (vq->vq_free_cnt < needed) {
969 VIRTIO_CRYPTO_SESSION_LOG_ERR(
970 "vq->vq_free_cnt = %d is less than %d, "
971 "not enough", vq->vq_free_cnt, needed);
976 * malloc memory to store information of ctrl request op,
977 * returned status and desc vring
979 malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
980 + NUM_ENTRY_SYM_CLEAR_SESSION
981 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
982 if (malloc_virt_addr == NULL) {
983 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
986 malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
988 /* assign ctrl request op part */
989 ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
990 ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
991 /* default data virtqueue is 0 */
992 ctrl->header.queue_id = 0;
993 ctrl->u.destroy_session.session_id = session->session_id;
996 status = &(((struct virtio_crypto_inhdr *)
997 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
998 *status = VIRTIO_CRYPTO_ERR;
1000 /* indirect desc vring part */
1001 desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
1004 /* ctrl request part */
1005 desc[0].addr = malloc_phys_addr;
1006 desc[0].len = len_op_ctrl_req;
1007 desc[0].flags = VRING_DESC_F_NEXT;
1011 desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
1012 desc[1].len = len_inhdr;
1013 desc[1].flags = VRING_DESC_F_WRITE;
1015 /* use only a single desc entry */
1016 head = vq->vq_desc_head_idx;
1017 vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
1018 vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
1019 vq->vq_ring.desc[head].len
1020 = NUM_ENTRY_SYM_CLEAR_SESSION
1021 * sizeof(struct vring_desc);
1022 vq->vq_free_cnt -= needed;
1024 vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
1026 vq_update_avail_ring(vq, head);
1027 vq_update_avail_idx(vq);
1029 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
1030 vq->vq_queue_index);
1032 virtqueue_notify(vq);
1035 while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
1040 while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
1041 uint32_t idx, desc_idx, used_idx;
1042 struct vring_used_elem *uep;
1044 used_idx = (uint32_t)(vq->vq_used_cons_idx
1045 & (vq->vq_nentries - 1));
1046 uep = &vq->vq_ring.used->ring[used_idx];
1047 idx = (uint32_t) uep->id;
1049 while (vq->vq_ring.desc[desc_idx].flags
1050 & VRING_DESC_F_NEXT) {
1051 desc_idx = vq->vq_ring.desc[desc_idx].next;
1055 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
1056 vq->vq_desc_head_idx = idx;
1057 vq->vq_used_cons_idx++;
1061 if (*status != VIRTIO_CRYPTO_OK) {
1062 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1063 "status=%"PRIu32", session_id=%"PRIu64"",
1064 *status, session->session_id);
1065 rte_free(malloc_virt_addr);
1069 VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
1070 "vq->vq_desc_head_idx=%d",
1071 vq->vq_free_cnt, vq->vq_desc_head_idx);
1073 VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1074 session->session_id);
1076 memset(sess, 0, sizeof(struct virtio_crypto_session));
1077 rte_free(malloc_virt_addr);
1080 static struct rte_crypto_cipher_xform *
1081 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1084 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1085 return &xform->cipher;
1087 xform = xform->next;
1093 static struct rte_crypto_auth_xform *
1094 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1097 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1098 return &xform->auth;
1100 xform = xform->next;
1106 /** Get xform chain order */
1108 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1114 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1115 xform->next == NULL)
1116 return VIRTIO_CRYPTO_CMD_CIPHER;
1118 /* Authentication Only */
1119 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1120 xform->next == NULL)
1121 return VIRTIO_CRYPTO_CMD_AUTH;
1123 /* Authenticate then Cipher */
1124 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1125 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1126 return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1128 /* Cipher then Authenticate */
1129 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1130 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1131 return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1137 virtio_crypto_sym_pad_cipher_param(
1138 struct virtio_crypto_cipher_session_para *para,
1139 struct rte_crypto_cipher_xform *cipher_xform)
1141 switch (cipher_xform->algo) {
1143 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1144 "Cipher alg %u", cipher_xform->algo);
1148 para->keylen = cipher_xform->key.length;
1149 switch (cipher_xform->op) {
1150 case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1151 para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1153 case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1154 para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1157 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1166 virtio_crypto_sym_pad_auth_param(
1167 struct virtio_crypto_op_ctrl_req *ctrl,
1168 struct rte_crypto_auth_xform *auth_xform)
1171 struct virtio_crypto_alg_chain_session_para *para =
1172 &(ctrl->u.sym_create_session.u.chain.para);
1174 switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1175 case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1176 algo = &(para->u.hash_param.algo);
1178 case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1179 algo = &(para->u.mac_param.algo);
1182 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1184 ctrl->u.sym_create_session.u.chain.para.hash_mode);
1188 switch (auth_xform->algo) {
1190 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1191 "Crypto: Undefined Hash algo %u specified",
1193 *algo = VIRTIO_CRYPTO_NO_MAC;
1201 virtio_crypto_sym_pad_op_ctrl_req(
1202 struct virtio_crypto_op_ctrl_req *ctrl,
1203 struct rte_crypto_sym_xform *xform, bool is_chainned,
1204 uint8_t **cipher_key_data, uint8_t **auth_key_data,
1205 struct virtio_crypto_session *session)
1208 struct rte_crypto_auth_xform *auth_xform = NULL;
1209 struct rte_crypto_cipher_xform *cipher_xform = NULL;
1211 /* Get cipher xform from crypto xform chain */
1212 cipher_xform = virtio_crypto_get_cipher_xform(xform);
1215 ret = virtio_crypto_sym_pad_cipher_param(
1216 &ctrl->u.sym_create_session.u.chain.para
1217 .cipher_param, cipher_xform);
1219 ret = virtio_crypto_sym_pad_cipher_param(
1220 &ctrl->u.sym_create_session.u.cipher.para,
1224 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1225 "pad cipher parameter failed");
1229 *cipher_key_data = cipher_xform->key.data;
1231 session->iv.offset = cipher_xform->iv.offset;
1232 session->iv.length = cipher_xform->iv.length;
1235 /* Get auth xform from crypto xform chain */
1236 auth_xform = virtio_crypto_get_auth_xform(xform);
1238 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1239 struct virtio_crypto_alg_chain_session_para *para =
1240 &(ctrl->u.sym_create_session.u.chain.para);
1241 if (auth_xform->key.length) {
1242 para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1243 para->u.mac_param.auth_key_len =
1244 (uint32_t)auth_xform->key.length;
1245 para->u.mac_param.hash_result_len =
1246 auth_xform->digest_length;
1248 *auth_key_data = auth_xform->key.data;
1250 para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1251 para->u.hash_param.hash_result_len =
1252 auth_xform->digest_length;
1255 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1257 VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1267 virtio_crypto_check_sym_configure_session_paras(
1268 struct rte_cryptodev *dev,
1269 struct rte_crypto_sym_xform *xform,
1270 struct rte_cryptodev_sym_session *sym_sess,
1271 struct rte_mempool *mempool)
1273 if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
1274 unlikely(mempool == NULL)) {
1275 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1279 if (virtio_crypto_check_sym_session_paras(dev) < 0)
1286 virtio_crypto_sym_configure_session(
1287 struct rte_cryptodev *dev,
1288 struct rte_crypto_sym_xform *xform,
1289 struct rte_cryptodev_sym_session *sess,
1290 struct rte_mempool *mempool)
1293 struct virtio_crypto_session crypto_sess;
1294 void *session_private = &crypto_sess;
1295 struct virtio_crypto_session *session;
1296 struct virtio_crypto_op_ctrl_req *ctrl_req;
1297 enum virtio_crypto_cmd_id cmd_id;
1298 uint8_t *cipher_key_data = NULL;
1299 uint8_t *auth_key_data = NULL;
1300 struct virtio_crypto_hw *hw;
1301 struct virtqueue *control_vq;
1303 PMD_INIT_FUNC_TRACE();
1305 ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1308 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1312 if (rte_mempool_get(mempool, &session_private)) {
1313 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1314 "Couldn't get object from session mempool");
1318 session = (struct virtio_crypto_session *)session_private;
1319 memset(session, 0, sizeof(struct virtio_crypto_session));
1320 ctrl_req = &session->ctrl;
1321 ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1322 /* FIXME: support multiqueue */
1323 ctrl_req->header.queue_id = 0;
1325 hw = dev->data->dev_private;
1326 control_vq = hw->cvq;
1328 cmd_id = virtio_crypto_get_chain_order(xform);
1329 if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1330 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1331 = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1332 if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1333 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1334 = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1337 case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1338 case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1339 ctrl_req->u.sym_create_session.op_type
1340 = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1342 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1343 xform, true, &cipher_key_data, &auth_key_data, session);
1345 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1346 "padding sym op ctrl req failed");
1349 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1350 cipher_key_data, auth_key_data, session);
1352 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1353 "create session failed: %d", ret);
1357 case VIRTIO_CRYPTO_CMD_CIPHER:
1358 ctrl_req->u.sym_create_session.op_type
1359 = VIRTIO_CRYPTO_SYM_OP_CIPHER;
1360 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1361 false, &cipher_key_data, &auth_key_data, session);
1363 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1364 "padding sym op ctrl req failed");
1367 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1368 cipher_key_data, NULL, session);
1370 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1371 "create session failed: %d", ret);
1376 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1377 "Unsupported operation chain order parameter");
1381 set_session_private_data(sess, dev->driver_id,
1391 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1392 struct rte_cryptodev_info *info)
1394 struct virtio_crypto_hw *hw = dev->data->dev_private;
1396 PMD_INIT_FUNC_TRACE();
1399 info->driver_id = cryptodev_virtio_driver_id;
1400 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1401 info->feature_flags = dev->feature_flags;
1402 info->max_nb_queue_pairs = hw->max_dataqueues;
1403 info->sym.max_nb_sessions =
1404 RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS;
1409 crypto_virtio_pci_probe(
1410 struct rte_pci_driver *pci_drv __rte_unused,
1411 struct rte_pci_device *pci_dev)
1413 struct rte_cryptodev_pmd_init_params init_params = {
1415 .socket_id = rte_socket_id(),
1416 .private_data_size = sizeof(struct virtio_crypto_hw),
1417 .max_nb_sessions = RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS
1419 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1421 VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1423 pci_dev->addr.devid,
1424 pci_dev->addr.function);
1426 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1428 return crypto_virtio_create(name, pci_dev, &init_params);
1432 crypto_virtio_pci_remove(
1433 struct rte_pci_device *pci_dev __rte_unused)
1435 struct rte_cryptodev *cryptodev;
1436 char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1438 if (pci_dev == NULL)
1441 rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1442 sizeof(cryptodev_name));
1444 cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1445 if (cryptodev == NULL)
1448 return virtio_crypto_dev_uninit(cryptodev);
1451 static struct rte_pci_driver rte_virtio_crypto_driver = {
1452 .id_table = pci_id_virtio_crypto_map,
1454 .probe = crypto_virtio_pci_probe,
1455 .remove = crypto_virtio_pci_remove
1458 static struct cryptodev_driver virtio_crypto_drv;
1460 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1461 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1462 rte_virtio_crypto_driver.driver,
1463 cryptodev_virtio_driver_id);
1465 RTE_INIT(virtio_crypto_init_log);
1467 virtio_crypto_init_log(void)
1469 virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
1470 if (virtio_crypto_logtype_init >= 0)
1471 rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
1473 virtio_crypto_logtype_session =
1474 rte_log_register("pmd.crypto.virtio.session");
1475 if (virtio_crypto_logtype_session >= 0)
1476 rte_log_set_level(virtio_crypto_logtype_session,
1479 virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
1480 if (virtio_crypto_logtype_rx >= 0)
1481 rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
1483 virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
1484 if (virtio_crypto_logtype_tx >= 0)
1485 rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
1487 virtio_crypto_logtype_driver =
1488 rte_log_register("pmd.crypto.virtio.driver");
1489 if (virtio_crypto_logtype_driver >= 0)
1490 rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);