1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 #include <rte_cryptodev_pmd.h>
11 #include "rte_aesni_mb_pmd_private.h"
14 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
16 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
18 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
20 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
37 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
39 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
41 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
58 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
60 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
62 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
79 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
81 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
83 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
100 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
102 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
104 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
121 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
123 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
125 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
141 { /* AES XCBC HMAC */
142 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
144 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
146 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
163 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
165 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
167 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
183 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
185 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
187 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
202 { /* AES DOCSIS BPI */
203 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
205 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
207 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
223 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
225 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
227 .algo = RTE_CRYPTO_CIPHER_DES_CBC,
242 { /* DES DOCSIS BPI */
243 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
245 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
247 .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
263 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
265 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
267 .algo = RTE_CRYPTO_AEAD_AES_CCM,
293 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
295 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
297 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
313 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
317 /** Configure device */
319 aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
320 __rte_unused struct rte_cryptodev_config *config)
327 aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
334 aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
340 aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
346 /** Get device statistics */
348 aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
349 struct rte_cryptodev_stats *stats)
353 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
354 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
356 stats->enqueued_count += qp->stats.enqueued_count;
357 stats->dequeued_count += qp->stats.dequeued_count;
359 stats->enqueue_err_count += qp->stats.enqueue_err_count;
360 stats->dequeue_err_count += qp->stats.dequeue_err_count;
364 /** Reset device statistics */
366 aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
370 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
371 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
373 memset(&qp->stats, 0, sizeof(qp->stats));
378 /** Get device info */
380 aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
381 struct rte_cryptodev_info *dev_info)
383 struct aesni_mb_private *internals = dev->data->dev_private;
385 if (dev_info != NULL) {
386 dev_info->driver_id = dev->driver_id;
387 dev_info->feature_flags = dev->feature_flags;
388 dev_info->capabilities = aesni_mb_pmd_capabilities;
389 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
390 dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
394 /** Release queue pair */
396 aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
398 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
399 struct rte_ring *r = NULL;
402 r = rte_ring_lookup(qp->name);
406 dev->data->queue_pairs[qp_id] = NULL;
411 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
413 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
414 struct aesni_mb_qp *qp)
416 unsigned n = snprintf(qp->name, sizeof(qp->name),
417 "aesni_mb_pmd_%u_qp_%u",
418 dev->data->dev_id, qp->id);
420 if (n >= sizeof(qp->name))
426 /** Create a ring to place processed operations on */
427 static struct rte_ring *
428 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
429 const char *str, unsigned int ring_size, int socket_id)
432 char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
434 unsigned int n = snprintf(ring_name, sizeof(ring_name),
438 if (n >= sizeof(ring_name))
441 r = rte_ring_lookup(ring_name);
443 if (rte_ring_get_size(r) >= ring_size) {
444 MB_LOG_INFO("Reusing existing ring %s for processed ops",
449 MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
454 return rte_ring_create(ring_name, ring_size, socket_id,
455 RING_F_SP_ENQ | RING_F_SC_DEQ);
458 /** Setup a queue pair */
460 aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
461 const struct rte_cryptodev_qp_conf *qp_conf,
462 int socket_id, struct rte_mempool *session_pool)
464 struct aesni_mb_qp *qp = NULL;
465 struct aesni_mb_private *internals = dev->data->dev_private;
467 /* Free memory prior to re-allocation if needed. */
468 if (dev->data->queue_pairs[qp_id] != NULL)
469 aesni_mb_pmd_qp_release(dev, qp_id);
471 /* Allocate the queue pair data structure. */
472 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
473 RTE_CACHE_LINE_SIZE, socket_id);
478 dev->data->queue_pairs[qp_id] = qp;
480 if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
481 goto qp_setup_cleanup;
484 qp->op_fns = &job_ops[internals->vector_mode];
486 qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
487 "ingress", qp_conf->nb_descriptors, socket_id);
488 if (qp->ingress_queue == NULL)
489 goto qp_setup_cleanup;
491 qp->sess_mp = session_pool;
493 memset(&qp->stats, 0, sizeof(qp->stats));
495 char mp_name[RTE_MEMPOOL_NAMESIZE];
497 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
498 "digest_mp_%u_%u", dev->data->dev_id, qp_id);
500 /* Initialise multi-buffer manager */
501 (*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
511 /** Start queue pair */
513 aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
514 __rte_unused uint16_t queue_pair_id)
519 /** Stop queue pair */
521 aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
522 __rte_unused uint16_t queue_pair_id)
527 /** Return the number of allocated queue pairs */
529 aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
531 return dev->data->nb_queue_pairs;
534 /** Returns the size of the aesni multi-buffer session structure */
536 aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
538 return sizeof(struct aesni_mb_session);
541 /** Configure a aesni multi-buffer session from a crypto xform chain */
543 aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
544 struct rte_crypto_sym_xform *xform,
545 struct rte_cryptodev_sym_session *sess,
546 struct rte_mempool *mempool)
548 void *sess_private_data;
549 struct aesni_mb_private *internals = dev->data->dev_private;
552 if (unlikely(sess == NULL)) {
553 MB_LOG_ERR("invalid session struct");
557 if (rte_mempool_get(mempool, &sess_private_data)) {
559 "Couldn't get object from session mempool");
563 ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
564 sess_private_data, xform);
566 MB_LOG_ERR("failed configure session parameters");
568 /* Return session to mempool */
569 rte_mempool_put(mempool, sess_private_data);
573 set_session_private_data(sess, dev->driver_id,
579 /** Clear the memory of session so it doesn't leave key material behind */
581 aesni_mb_pmd_session_clear(struct rte_cryptodev *dev,
582 struct rte_cryptodev_sym_session *sess)
584 uint8_t index = dev->driver_id;
585 void *sess_priv = get_session_private_data(sess, index);
587 /* Zero out the whole structure */
589 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
590 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
591 set_session_private_data(sess, index, NULL);
592 rte_mempool_put(sess_mp, sess_priv);
596 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
597 .dev_configure = aesni_mb_pmd_config,
598 .dev_start = aesni_mb_pmd_start,
599 .dev_stop = aesni_mb_pmd_stop,
600 .dev_close = aesni_mb_pmd_close,
602 .stats_get = aesni_mb_pmd_stats_get,
603 .stats_reset = aesni_mb_pmd_stats_reset,
605 .dev_infos_get = aesni_mb_pmd_info_get,
607 .queue_pair_setup = aesni_mb_pmd_qp_setup,
608 .queue_pair_release = aesni_mb_pmd_qp_release,
609 .queue_pair_start = aesni_mb_pmd_qp_start,
610 .queue_pair_stop = aesni_mb_pmd_qp_stop,
611 .queue_pair_count = aesni_mb_pmd_qp_count,
613 .session_get_size = aesni_mb_pmd_session_get_size,
614 .session_configure = aesni_mb_pmd_session_configure,
615 .session_clear = aesni_mb_pmd_session_clear
618 struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;