1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 #include <rte_cryptodev_pmd.h>
11 #include "rte_aesni_mb_pmd_private.h"
14 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
16 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
18 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
20 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
37 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
39 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
41 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
58 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
60 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
62 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
79 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
81 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
83 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
100 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
102 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
104 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
121 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
123 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
125 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
141 { /* AES XCBC HMAC */
142 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
144 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
146 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
163 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
165 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
167 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
183 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
185 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
187 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
202 { /* AES DOCSIS BPI */
203 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
205 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
207 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
223 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
225 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
227 .algo = RTE_CRYPTO_CIPHER_DES_CBC,
242 { /* DES DOCSIS BPI */
243 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
245 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
247 .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
263 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
265 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
267 .algo = RTE_CRYPTO_AEAD_AES_CCM,
294 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
298 /** Configure device */
300 aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
301 __rte_unused struct rte_cryptodev_config *config)
308 aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
315 aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
321 aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
327 /** Get device statistics */
329 aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
330 struct rte_cryptodev_stats *stats)
334 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
335 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
337 stats->enqueued_count += qp->stats.enqueued_count;
338 stats->dequeued_count += qp->stats.dequeued_count;
340 stats->enqueue_err_count += qp->stats.enqueue_err_count;
341 stats->dequeue_err_count += qp->stats.dequeue_err_count;
345 /** Reset device statistics */
347 aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
351 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
352 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
354 memset(&qp->stats, 0, sizeof(qp->stats));
359 /** Get device info */
361 aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
362 struct rte_cryptodev_info *dev_info)
364 struct aesni_mb_private *internals = dev->data->dev_private;
366 if (dev_info != NULL) {
367 dev_info->driver_id = dev->driver_id;
368 dev_info->feature_flags = dev->feature_flags;
369 dev_info->capabilities = aesni_mb_pmd_capabilities;
370 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
371 dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
375 /** Release queue pair */
377 aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
379 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
380 struct rte_ring *r = NULL;
383 r = rte_ring_lookup(qp->name);
387 dev->data->queue_pairs[qp_id] = NULL;
392 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
394 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
395 struct aesni_mb_qp *qp)
397 unsigned n = snprintf(qp->name, sizeof(qp->name),
398 "aesni_mb_pmd_%u_qp_%u",
399 dev->data->dev_id, qp->id);
401 if (n >= sizeof(qp->name))
407 /** Create a ring to place processed operations on */
408 static struct rte_ring *
409 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
410 const char *str, unsigned int ring_size, int socket_id)
413 char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
415 unsigned int n = snprintf(ring_name, sizeof(ring_name),
419 if (n >= sizeof(ring_name))
422 r = rte_ring_lookup(ring_name);
424 if (rte_ring_get_size(r) >= ring_size) {
425 MB_LOG_INFO("Reusing existing ring %s for processed ops",
430 MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
435 return rte_ring_create(ring_name, ring_size, socket_id,
436 RING_F_SP_ENQ | RING_F_SC_DEQ);
439 /** Setup a queue pair */
441 aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
442 const struct rte_cryptodev_qp_conf *qp_conf,
443 int socket_id, struct rte_mempool *session_pool)
445 struct aesni_mb_qp *qp = NULL;
446 struct aesni_mb_private *internals = dev->data->dev_private;
448 /* Free memory prior to re-allocation if needed. */
449 if (dev->data->queue_pairs[qp_id] != NULL)
450 aesni_mb_pmd_qp_release(dev, qp_id);
452 /* Allocate the queue pair data structure. */
453 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
454 RTE_CACHE_LINE_SIZE, socket_id);
459 dev->data->queue_pairs[qp_id] = qp;
461 if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
462 goto qp_setup_cleanup;
465 qp->op_fns = &job_ops[internals->vector_mode];
467 qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
468 "ingress", qp_conf->nb_descriptors, socket_id);
469 if (qp->ingress_queue == NULL)
470 goto qp_setup_cleanup;
472 qp->sess_mp = session_pool;
474 memset(&qp->stats, 0, sizeof(qp->stats));
476 char mp_name[RTE_MEMPOOL_NAMESIZE];
478 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
479 "digest_mp_%u_%u", dev->data->dev_id, qp_id);
481 /* Initialise multi-buffer manager */
482 (*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
492 /** Start queue pair */
494 aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
495 __rte_unused uint16_t queue_pair_id)
500 /** Stop queue pair */
502 aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
503 __rte_unused uint16_t queue_pair_id)
508 /** Return the number of allocated queue pairs */
510 aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
512 return dev->data->nb_queue_pairs;
515 /** Returns the size of the aesni multi-buffer session structure */
517 aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
519 return sizeof(struct aesni_mb_session);
522 /** Configure a aesni multi-buffer session from a crypto xform chain */
524 aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
525 struct rte_crypto_sym_xform *xform,
526 struct rte_cryptodev_sym_session *sess,
527 struct rte_mempool *mempool)
529 void *sess_private_data;
530 struct aesni_mb_private *internals = dev->data->dev_private;
533 if (unlikely(sess == NULL)) {
534 MB_LOG_ERR("invalid session struct");
538 if (rte_mempool_get(mempool, &sess_private_data)) {
540 "Couldn't get object from session mempool");
544 ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
545 sess_private_data, xform);
547 MB_LOG_ERR("failed configure session parameters");
549 /* Return session to mempool */
550 rte_mempool_put(mempool, sess_private_data);
554 set_session_private_data(sess, dev->driver_id,
560 /** Clear the memory of session so it doesn't leave key material behind */
562 aesni_mb_pmd_session_clear(struct rte_cryptodev *dev,
563 struct rte_cryptodev_sym_session *sess)
565 uint8_t index = dev->driver_id;
566 void *sess_priv = get_session_private_data(sess, index);
568 /* Zero out the whole structure */
570 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
571 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
572 set_session_private_data(sess, index, NULL);
573 rte_mempool_put(sess_mp, sess_priv);
577 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
578 .dev_configure = aesni_mb_pmd_config,
579 .dev_start = aesni_mb_pmd_start,
580 .dev_stop = aesni_mb_pmd_stop,
581 .dev_close = aesni_mb_pmd_close,
583 .stats_get = aesni_mb_pmd_stats_get,
584 .stats_reset = aesni_mb_pmd_stats_reset,
586 .dev_infos_get = aesni_mb_pmd_info_get,
588 .queue_pair_setup = aesni_mb_pmd_qp_setup,
589 .queue_pair_release = aesni_mb_pmd_qp_release,
590 .queue_pair_start = aesni_mb_pmd_qp_start,
591 .queue_pair_stop = aesni_mb_pmd_qp_stop,
592 .queue_pair_count = aesni_mb_pmd_qp_count,
594 .session_get_size = aesni_mb_pmd_session_get_size,
595 .session_configure = aesni_mb_pmd_session_configure,
596 .session_clear = aesni_mb_pmd_session_clear
599 struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;