4 * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/types.h>
34 #include <sys/queue.h>
43 #include <netinet/in.h>
45 #include <rte_byteorder.h>
47 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
68 #include "rte_crypto.h"
69 #include "rte_cryptodev.h"
70 #include "rte_cryptodev_pmd.h"
72 struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS];
74 struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0];
76 static struct rte_cryptodev_global cryptodev_globals = {
77 .devs = &rte_crypto_devices[0],
80 .max_devs = RTE_CRYPTO_MAX_DEVS
83 struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals;
85 /* spinlock for crypto device callbacks */
86 static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER;
90 * The user application callback description.
92 * It contains callback address to be registered by user application,
93 * the pointer to the parameters for callback, and the event type.
95 struct rte_cryptodev_callback {
96 TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */
97 rte_cryptodev_cb_fn cb_fn; /**< Callback address */
98 void *cb_arg; /**< Parameter for callback */
99 enum rte_cryptodev_event_type event; /**< Interrupt event type */
100 uint32_t active; /**< Callback is executing */
104 * The crypto cipher algorithm strings identifiers.
105 * It could be used in application command line.
108 rte_crypto_cipher_algorithm_strings[] = {
109 [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
110 [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
111 [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
113 [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
114 [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
115 [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
116 [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
117 [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
118 [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
120 [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
122 [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
123 [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
125 [RTE_CRYPTO_CIPHER_NULL] = "null",
127 [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
128 [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
129 [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
133 * The crypto cipher operation strings identifiers.
134 * It could be used in application command line.
137 rte_crypto_cipher_operation_strings[] = {
138 [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
139 [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
143 * The crypto auth algorithm strings identifiers.
144 * It could be used in application command line.
147 rte_crypto_auth_algorithm_strings[] = {
148 [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
149 [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
150 [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
151 [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
153 [RTE_CRYPTO_AUTH_MD5] = "md5",
154 [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
156 [RTE_CRYPTO_AUTH_NULL] = "null",
158 [RTE_CRYPTO_AUTH_SHA1] = "sha1",
159 [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
161 [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
162 [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
163 [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
164 [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
165 [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
166 [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
167 [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
168 [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
170 [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
171 [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
172 [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
176 * The crypto AEAD algorithm strings identifiers.
177 * It could be used in application command line.
180 rte_crypto_aead_algorithm_strings[] = {
181 [RTE_CRYPTO_AEAD_AES_CCM] = "aes-ccm",
182 [RTE_CRYPTO_AEAD_AES_GCM] = "aes-gcm",
186 * The crypto AEAD operation strings identifiers.
187 * It could be used in application command line.
190 rte_crypto_aead_operation_strings[] = {
191 [RTE_CRYPTO_AEAD_OP_ENCRYPT] = "encrypt",
192 [RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
196 rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
197 const char *algo_string)
201 for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
202 if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
203 *algo_enum = (enum rte_crypto_cipher_algorithm) i;
213 rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
214 const char *algo_string)
218 for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
219 if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
220 *algo_enum = (enum rte_crypto_auth_algorithm) i;
230 rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
231 const char *algo_string)
235 for (i = 1; i < RTE_DIM(rte_crypto_aead_algorithm_strings); i++) {
236 if (strcmp(algo_string, rte_crypto_aead_algorithm_strings[i]) == 0) {
237 *algo_enum = (enum rte_crypto_aead_algorithm) i;
247 * The crypto auth operation strings identifiers.
248 * It could be used in application command line.
251 rte_crypto_auth_operation_strings[] = {
252 [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
253 [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
256 const struct rte_cryptodev_symmetric_capability *
257 rte_cryptodev_sym_capability_get(uint8_t dev_id,
258 const struct rte_cryptodev_sym_capability_idx *idx)
260 const struct rte_cryptodev_capabilities *capability;
261 struct rte_cryptodev_info dev_info;
264 rte_cryptodev_info_get(dev_id, &dev_info);
266 while ((capability = &dev_info.capabilities[i++])->op !=
267 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
268 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
271 if (capability->sym.xform_type != idx->type)
274 if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
275 capability->sym.auth.algo == idx->algo.auth)
276 return &capability->sym;
278 if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
279 capability->sym.cipher.algo == idx->algo.cipher)
280 return &capability->sym;
282 if (idx->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
283 capability->sym.aead.algo == idx->algo.aead)
284 return &capability->sym;
291 #define param_range_check(x, y) \
292 (((x < y.min) || (x > y.max)) || \
293 (y.increment != 0 && (x % y.increment) != 0))
296 rte_cryptodev_sym_capability_check_cipher(
297 const struct rte_cryptodev_symmetric_capability *capability,
298 uint16_t key_size, uint16_t iv_size)
300 if (param_range_check(key_size, capability->cipher.key_size))
303 if (param_range_check(iv_size, capability->cipher.iv_size))
310 rte_cryptodev_sym_capability_check_auth(
311 const struct rte_cryptodev_symmetric_capability *capability,
312 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
315 if (param_range_check(key_size, capability->auth.key_size))
318 if (param_range_check(digest_size, capability->auth.digest_size))
321 if (param_range_check(aad_size, capability->auth.aad_size))
324 if (param_range_check(iv_size, capability->auth.iv_size))
331 rte_cryptodev_sym_capability_check_aead(
332 const struct rte_cryptodev_symmetric_capability *capability,
333 uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
336 if (param_range_check(key_size, capability->aead.key_size))
339 if (param_range_check(digest_size, capability->aead.digest_size))
342 if (param_range_check(aad_size, capability->aead.aad_size))
345 if (param_range_check(iv_size, capability->aead.iv_size))
352 rte_cryptodev_get_feature_name(uint64_t flag)
355 case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO:
356 return "SYMMETRIC_CRYPTO";
357 case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO:
358 return "ASYMMETRIC_CRYPTO";
359 case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING:
360 return "SYM_OPERATION_CHAINING";
361 case RTE_CRYPTODEV_FF_CPU_SSE:
363 case RTE_CRYPTODEV_FF_CPU_AVX:
365 case RTE_CRYPTODEV_FF_CPU_AVX2:
367 case RTE_CRYPTODEV_FF_CPU_AESNI:
369 case RTE_CRYPTODEV_FF_HW_ACCELERATED:
370 return "HW_ACCELERATED";
371 case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER:
372 return "MBUF_SCATTER_GATHER";
373 case RTE_CRYPTODEV_FF_CPU_NEON:
375 case RTE_CRYPTODEV_FF_CPU_ARM_CE:
383 rte_cryptodev_create_vdev(const char *name, const char *args)
385 return rte_vdev_init(name, args);
388 struct rte_cryptodev *
389 rte_cryptodev_pmd_get_dev(uint8_t dev_id)
391 return &rte_cryptodev_globals->devs[dev_id];
394 struct rte_cryptodev *
395 rte_cryptodev_pmd_get_named_dev(const char *name)
397 struct rte_cryptodev *dev;
403 for (i = 0; i < rte_cryptodev_globals->max_devs; i++) {
404 dev = &rte_cryptodev_globals->devs[i];
406 if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
407 (strcmp(dev->data->name, name) == 0))
415 rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
417 struct rte_cryptodev *dev = NULL;
419 if (dev_id >= rte_cryptodev_globals->nb_devs)
422 dev = rte_cryptodev_pmd_get_dev(dev_id);
423 if (dev->attached != RTE_CRYPTODEV_ATTACHED)
431 rte_cryptodev_get_dev_id(const char *name)
438 for (i = 0; i < rte_cryptodev_globals->nb_devs; i++)
439 if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name)
441 (rte_cryptodev_globals->devs[i].attached ==
442 RTE_CRYPTODEV_ATTACHED))
449 rte_cryptodev_count(void)
451 return rte_cryptodev_globals->nb_devs;
455 rte_cryptodev_count_devtype(enum rte_cryptodev_type type)
457 uint8_t i, dev_count = 0;
459 for (i = 0; i < rte_cryptodev_globals->max_devs; i++)
460 if (rte_cryptodev_globals->devs[i].dev_type == type &&
461 rte_cryptodev_globals->devs[i].attached ==
462 RTE_CRYPTODEV_ATTACHED)
469 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
472 uint8_t i, count = 0;
473 struct rte_cryptodev *devs = rte_cryptodev_globals->devs;
474 uint8_t max_devs = rte_cryptodev_globals->max_devs;
476 for (i = 0; i < max_devs && count < nb_devices; i++) {
478 if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
481 cmp = strncmp(devs[i].device->driver->name,
483 strlen(driver_name));
486 devices[count++] = devs[i].data->dev_id;
494 rte_cryptodev_socket_id(uint8_t dev_id)
496 struct rte_cryptodev *dev;
498 if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
501 dev = rte_cryptodev_pmd_get_dev(dev_id);
503 return dev->data->socket_id;
507 rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data,
510 char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
511 const struct rte_memzone *mz;
514 /* generate memzone name */
515 n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id);
516 if (n >= (int)sizeof(mz_name))
519 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
520 mz = rte_memzone_reserve(mz_name,
521 sizeof(struct rte_cryptodev_data),
524 mz = rte_memzone_lookup(mz_name);
530 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
531 memset(*data, 0, sizeof(struct rte_cryptodev_data));
537 rte_cryptodev_find_free_device_index(void)
541 for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) {
542 if (rte_crypto_devices[dev_id].attached ==
543 RTE_CRYPTODEV_DETACHED)
546 return RTE_CRYPTO_MAX_DEVS;
549 struct rte_cryptodev *
550 rte_cryptodev_pmd_allocate(const char *name, int socket_id)
552 struct rte_cryptodev *cryptodev;
555 if (rte_cryptodev_pmd_get_named_dev(name) != NULL) {
556 CDEV_LOG_ERR("Crypto device with name %s already "
561 dev_id = rte_cryptodev_find_free_device_index();
562 if (dev_id == RTE_CRYPTO_MAX_DEVS) {
563 CDEV_LOG_ERR("Reached maximum number of crypto devices");
567 cryptodev = rte_cryptodev_pmd_get_dev(dev_id);
569 if (cryptodev->data == NULL) {
570 struct rte_cryptodev_data *cryptodev_data =
571 cryptodev_globals.data[dev_id];
573 int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data,
576 if (retval < 0 || cryptodev_data == NULL)
579 cryptodev->data = cryptodev_data;
581 snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN,
584 cryptodev->data->dev_id = dev_id;
585 cryptodev->data->socket_id = socket_id;
586 cryptodev->data->dev_started = 0;
588 cryptodev->attached = RTE_CRYPTODEV_ATTACHED;
590 cryptodev_globals.nb_devs++;
597 rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev)
601 if (cryptodev == NULL)
604 /* Close device only if device operations have been set */
605 if (cryptodev->dev_ops) {
606 ret = rte_cryptodev_close(cryptodev->data->dev_id);
611 cryptodev->attached = RTE_CRYPTODEV_DETACHED;
612 cryptodev_globals.nb_devs--;
617 rte_cryptodev_queue_pair_count(uint8_t dev_id)
619 struct rte_cryptodev *dev;
621 dev = &rte_crypto_devices[dev_id];
622 return dev->data->nb_queue_pairs;
626 rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
629 struct rte_cryptodev_info dev_info;
633 if ((dev == NULL) || (nb_qpairs < 1)) {
634 CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u",
639 CDEV_LOG_DEBUG("Setup %d queues pairs on device %u",
640 nb_qpairs, dev->data->dev_id);
642 memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
644 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
645 (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
647 if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
648 CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
649 nb_qpairs, dev->data->dev_id);
653 if (dev->data->queue_pairs == NULL) { /* first time configuration */
654 dev->data->queue_pairs = rte_zmalloc_socket(
655 "cryptodev->queue_pairs",
656 sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
657 RTE_CACHE_LINE_SIZE, socket_id);
659 if (dev->data->queue_pairs == NULL) {
660 dev->data->nb_queue_pairs = 0;
661 CDEV_LOG_ERR("failed to get memory for qp meta data, "
666 } else { /* re-configure */
668 uint16_t old_nb_queues = dev->data->nb_queue_pairs;
670 qp = dev->data->queue_pairs;
672 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
675 for (i = nb_qpairs; i < old_nb_queues; i++) {
676 ret = (*dev->dev_ops->queue_pair_release)(dev, i);
681 qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
682 RTE_CACHE_LINE_SIZE);
684 CDEV_LOG_ERR("failed to realloc qp meta data,"
685 " nb_queues %u", nb_qpairs);
689 if (nb_qpairs > old_nb_queues) {
690 uint16_t new_qs = nb_qpairs - old_nb_queues;
692 memset(qp + old_nb_queues, 0,
693 sizeof(qp[0]) * new_qs);
696 dev->data->queue_pairs = qp;
699 dev->data->nb_queue_pairs = nb_qpairs;
704 rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id)
706 struct rte_cryptodev *dev;
708 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
709 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
713 dev = &rte_crypto_devices[dev_id];
714 if (queue_pair_id >= dev->data->nb_queue_pairs) {
715 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
719 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP);
721 return dev->dev_ops->queue_pair_start(dev, queue_pair_id);
726 rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
728 struct rte_cryptodev *dev;
730 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
731 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
735 dev = &rte_crypto_devices[dev_id];
736 if (queue_pair_id >= dev->data->nb_queue_pairs) {
737 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
741 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP);
743 return dev->dev_ops->queue_pair_stop(dev, queue_pair_id);
748 rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
749 unsigned nb_objs, unsigned obj_cache_size, int socket_id);
752 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
754 struct rte_cryptodev *dev;
757 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
758 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
762 dev = &rte_crypto_devices[dev_id];
764 if (dev->data->dev_started) {
766 "device %d must be stopped to allow configuration", dev_id);
770 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
772 /* Setup new number of queue pairs and reconfigure device. */
773 diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
776 CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d",
781 /* Setup Session mempool for device */
782 diag = rte_cryptodev_sym_session_pool_create(dev,
783 config->session_mp.nb_objs,
784 config->session_mp.cache_size,
789 return (*dev->dev_ops->dev_configure)(dev, config);
794 rte_cryptodev_start(uint8_t dev_id)
796 struct rte_cryptodev *dev;
799 CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
801 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
802 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
806 dev = &rte_crypto_devices[dev_id];
808 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
810 if (dev->data->dev_started != 0) {
811 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started",
816 diag = (*dev->dev_ops->dev_start)(dev);
818 dev->data->dev_started = 1;
826 rte_cryptodev_stop(uint8_t dev_id)
828 struct rte_cryptodev *dev;
830 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
831 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
835 dev = &rte_crypto_devices[dev_id];
837 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
839 if (dev->data->dev_started == 0) {
840 CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped",
845 (*dev->dev_ops->dev_stop)(dev);
846 dev->data->dev_started = 0;
850 rte_cryptodev_close(uint8_t dev_id)
852 struct rte_cryptodev *dev;
855 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
856 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
860 dev = &rte_crypto_devices[dev_id];
862 /* Device must be stopped before it can be closed */
863 if (dev->data->dev_started == 1) {
864 CDEV_LOG_ERR("Device %u must be stopped before closing",
869 /* We can't close the device if there are outstanding sessions in use */
870 if (dev->data->session_pool != NULL) {
871 if (!rte_mempool_full(dev->data->session_pool)) {
872 CDEV_LOG_ERR("dev_id=%u close failed, session mempool "
873 "has sessions still in use, free "
874 "all sessions before calling close",
880 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
881 retval = (*dev->dev_ops->dev_close)(dev);
890 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
891 const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
893 struct rte_cryptodev *dev;
895 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
896 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
900 dev = &rte_crypto_devices[dev_id];
901 if (queue_pair_id >= dev->data->nb_queue_pairs) {
902 CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
906 if (dev->data->dev_started) {
908 "device %d must be stopped to allow configuration", dev_id);
912 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
914 return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf,
920 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
922 struct rte_cryptodev *dev;
924 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
925 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
930 CDEV_LOG_ERR("Invalid stats ptr");
934 dev = &rte_crypto_devices[dev_id];
935 memset(stats, 0, sizeof(*stats));
937 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
938 (*dev->dev_ops->stats_get)(dev, stats);
943 rte_cryptodev_stats_reset(uint8_t dev_id)
945 struct rte_cryptodev *dev;
947 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
948 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
952 dev = &rte_crypto_devices[dev_id];
954 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
955 (*dev->dev_ops->stats_reset)(dev);
960 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
962 struct rte_cryptodev *dev;
964 if (dev_id >= cryptodev_globals.nb_devs) {
965 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
969 dev = &rte_crypto_devices[dev_id];
971 memset(dev_info, 0, sizeof(struct rte_cryptodev_info));
973 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
974 (*dev->dev_ops->dev_infos_get)(dev, dev_info);
976 dev_info->driver_name = dev->device->driver->name;
981 rte_cryptodev_callback_register(uint8_t dev_id,
982 enum rte_cryptodev_event_type event,
983 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
985 struct rte_cryptodev *dev;
986 struct rte_cryptodev_callback *user_cb;
991 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
992 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
996 dev = &rte_crypto_devices[dev_id];
997 rte_spinlock_lock(&rte_cryptodev_cb_lock);
999 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
1000 if (user_cb->cb_fn == cb_fn &&
1001 user_cb->cb_arg == cb_arg &&
1002 user_cb->event == event) {
1007 /* create a new callback. */
1008 if (user_cb == NULL) {
1009 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
1010 sizeof(struct rte_cryptodev_callback), 0);
1011 if (user_cb != NULL) {
1012 user_cb->cb_fn = cb_fn;
1013 user_cb->cb_arg = cb_arg;
1014 user_cb->event = event;
1015 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
1019 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1020 return (user_cb == NULL) ? -ENOMEM : 0;
1024 rte_cryptodev_callback_unregister(uint8_t dev_id,
1025 enum rte_cryptodev_event_type event,
1026 rte_cryptodev_cb_fn cb_fn, void *cb_arg)
1029 struct rte_cryptodev *dev;
1030 struct rte_cryptodev_callback *cb, *next;
1035 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1036 CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
1040 dev = &rte_crypto_devices[dev_id];
1041 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1044 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
1046 next = TAILQ_NEXT(cb, next);
1048 if (cb->cb_fn != cb_fn || cb->event != event ||
1049 (cb->cb_arg != (void *)-1 &&
1050 cb->cb_arg != cb_arg))
1054 * if this callback is not executing right now,
1057 if (cb->active == 0) {
1058 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
1065 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1070 rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev,
1071 enum rte_cryptodev_event_type event)
1073 struct rte_cryptodev_callback *cb_lst;
1074 struct rte_cryptodev_callback dev_cb;
1076 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1077 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
1078 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
1082 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1083 dev_cb.cb_fn(dev->data->dev_id, dev_cb.event,
1085 rte_spinlock_lock(&rte_cryptodev_cb_lock);
1088 rte_spinlock_unlock(&rte_cryptodev_cb_lock);
1093 rte_cryptodev_sym_session_init(struct rte_mempool *mp,
1096 __rte_unused unsigned i)
1098 struct rte_cryptodev_sym_session *sess = _sess;
1099 struct rte_cryptodev *dev = opaque_arg;
1101 memset(sess, 0, mp->elt_size);
1103 sess->dev_id = dev->data->dev_id;
1104 sess->dev_type = dev->dev_type;
1107 if (dev->dev_ops->session_initialize)
1108 (*dev->dev_ops->session_initialize)(mp, sess);
1112 rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev,
1113 unsigned nb_objs, unsigned obj_cache_size, int socket_id)
1115 char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1116 unsigned priv_sess_size;
1118 unsigned n = snprintf(mp_name, sizeof(mp_name), "cdev_%d_sess_mp",
1120 if (n > sizeof(mp_name)) {
1121 CDEV_LOG_ERR("Unable to create unique name for session mempool");
1125 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_get_size, -ENOTSUP);
1126 priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
1127 if (priv_sess_size == 0) {
1128 CDEV_LOG_ERR("%s returned and invalid private session size ",
1133 unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) +
1136 dev->data->session_pool = rte_mempool_lookup(mp_name);
1137 if (dev->data->session_pool != NULL) {
1138 if ((dev->data->session_pool->elt_size != elt_size) ||
1139 (dev->data->session_pool->cache_size <
1141 (dev->data->session_pool->size < nb_objs)) {
1143 CDEV_LOG_ERR("%s mempool already exists with different"
1144 " initialization parameters", mp_name);
1145 dev->data->session_pool = NULL;
1149 dev->data->session_pool = rte_mempool_create(
1150 mp_name, /* mempool name */
1151 nb_objs, /* number of elements*/
1152 elt_size, /* element size*/
1153 obj_cache_size, /* Cache size*/
1154 0, /* private data size */
1155 NULL, /* obj initialization constructor */
1156 NULL, /* obj initialization constructor arg */
1157 rte_cryptodev_sym_session_init,
1158 /**< obj constructor*/
1159 dev, /* obj constructor arg */
1160 socket_id, /* socket id */
1163 if (dev->data->session_pool == NULL) {
1164 CDEV_LOG_ERR("%s mempool allocation failed", mp_name);
1169 CDEV_LOG_DEBUG("%s mempool created!", mp_name);
1173 struct rte_cryptodev_sym_session *
1174 rte_cryptodev_sym_session_create(uint8_t dev_id,
1175 struct rte_crypto_sym_xform *xform)
1177 struct rte_cryptodev *dev;
1178 struct rte_cryptodev_sym_session *sess;
1181 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1182 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1186 dev = &rte_crypto_devices[dev_id];
1188 /* Allocate a session structure from the session pool */
1189 if (rte_mempool_get(dev->data->session_pool, &_sess)) {
1190 CDEV_LOG_ERR("Couldn't get object from session mempool");
1196 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
1197 if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
1199 CDEV_LOG_ERR("dev_id %d failed to configure session details",
1202 /* Return session to mempool */
1203 rte_mempool_put(sess->mp, _sess);
1211 rte_cryptodev_queue_pair_attach_sym_session(uint16_t qp_id,
1212 struct rte_cryptodev_sym_session *sess)
1214 struct rte_cryptodev *dev;
1216 if (!rte_cryptodev_pmd_is_valid_dev(sess->dev_id)) {
1217 CDEV_LOG_ERR("Invalid dev_id=%d", sess->dev_id);
1221 dev = &rte_crypto_devices[sess->dev_id];
1223 /* The API is optional, not returning error if driver do not suuport */
1224 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_attach_session, 0);
1225 if (dev->dev_ops->qp_attach_session(dev, qp_id, sess->_private)) {
1226 CDEV_LOG_ERR("dev_id %d failed to attach qp: %d with session",
1227 sess->dev_id, qp_id);
1235 rte_cryptodev_queue_pair_detach_sym_session(uint16_t qp_id,
1236 struct rte_cryptodev_sym_session *sess)
1238 struct rte_cryptodev *dev;
1240 if (!rte_cryptodev_pmd_is_valid_dev(sess->dev_id)) {
1241 CDEV_LOG_ERR("Invalid dev_id=%d", sess->dev_id);
1245 dev = &rte_crypto_devices[sess->dev_id];
1247 /* The API is optional, not returning error if driver do not suuport */
1248 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_detach_session, 0);
1249 if (dev->dev_ops->qp_detach_session(dev, qp_id, sess->_private)) {
1250 CDEV_LOG_ERR("dev_id %d failed to detach qp: %d from session",
1251 sess->dev_id, qp_id);
1257 struct rte_cryptodev_sym_session *
1258 rte_cryptodev_sym_session_free(uint8_t dev_id,
1259 struct rte_cryptodev_sym_session *sess)
1261 struct rte_cryptodev *dev;
1263 if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
1264 CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
1268 dev = &rte_crypto_devices[dev_id];
1270 /* Check the session belongs to this device type */
1271 if (sess->dev_type != dev->dev_type)
1274 /* Let device implementation clear session material */
1275 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_clear, sess);
1276 dev->dev_ops->session_clear(dev, (void *)sess->_private);
1278 /* Return session to mempool */
1279 rte_mempool_put(sess->mp, (void *)sess);
1284 /** Initialise rte_crypto_op mempool element */
1286 rte_crypto_op_init(struct rte_mempool *mempool,
1289 __rte_unused unsigned i)
1291 struct rte_crypto_op *op = _op_data;
1292 enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg;
1294 memset(_op_data, 0, mempool->elt_size);
1296 __rte_crypto_op_reset(op, type);
1298 op->phys_addr = rte_mem_virt2phy(_op_data);
1299 op->mempool = mempool;
1303 struct rte_mempool *
1304 rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
1305 unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
1308 struct rte_crypto_op_pool_private *priv;
1310 unsigned elt_size = sizeof(struct rte_crypto_op) +
1311 sizeof(struct rte_crypto_sym_op) +
1314 /* lookup mempool in case already allocated */
1315 struct rte_mempool *mp = rte_mempool_lookup(name);
1318 priv = (struct rte_crypto_op_pool_private *)
1319 rte_mempool_get_priv(mp);
1321 if (mp->elt_size != elt_size ||
1322 mp->cache_size < cache_size ||
1323 mp->size < nb_elts ||
1324 priv->priv_size < priv_size) {
1326 CDEV_LOG_ERR("Mempool %s already exists but with "
1327 "incompatible parameters", name);
1333 mp = rte_mempool_create(
1338 sizeof(struct rte_crypto_op_pool_private),
1347 CDEV_LOG_ERR("Failed to create mempool %s", name);
1351 priv = (struct rte_crypto_op_pool_private *)
1352 rte_mempool_get_priv(mp);
1354 priv->priv_size = priv_size;
1361 rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
1363 struct rte_cryptodev *dev = NULL;
1369 for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
1370 int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
1371 "%s_%u", dev_name_prefix, i);
1376 dev = rte_cryptodev_pmd_get_named_dev(name);