4 * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_cryptodev_pmd.h>
39 #include "rte_aesni_mb_pmd_private.h"
42 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
44 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
46 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
48 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
66 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
68 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
70 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
88 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
90 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
92 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
110 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
112 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
114 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
132 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
134 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
136 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
154 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
156 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
158 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
175 { /* AES XCBC HMAC */
176 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
178 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
180 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
198 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
200 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
202 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
218 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
220 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
222 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
237 { /* AES DOCSIS BPI */
238 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
240 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
242 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
258 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
262 /** Configure device */
264 aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
265 __rte_unused struct rte_cryptodev_config *config)
272 aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
279 aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
285 aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
291 /** Get device statistics */
293 aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
294 struct rte_cryptodev_stats *stats)
298 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
299 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
301 stats->enqueued_count += qp->stats.enqueued_count;
302 stats->dequeued_count += qp->stats.dequeued_count;
304 stats->enqueue_err_count += qp->stats.enqueue_err_count;
305 stats->dequeue_err_count += qp->stats.dequeue_err_count;
309 /** Reset device statistics */
311 aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
315 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
316 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
318 memset(&qp->stats, 0, sizeof(qp->stats));
323 /** Get device info */
325 aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
326 struct rte_cryptodev_info *dev_info)
328 struct aesni_mb_private *internals = dev->data->dev_private;
330 if (dev_info != NULL) {
331 dev_info->driver_id = dev->driver_id;
332 dev_info->feature_flags = dev->feature_flags;
333 dev_info->capabilities = aesni_mb_pmd_capabilities;
334 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
335 dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
339 /** Release queue pair */
341 aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
343 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
344 struct rte_ring *r = NULL;
347 r = rte_ring_lookup(qp->name);
351 dev->data->queue_pairs[qp_id] = NULL;
356 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
358 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
359 struct aesni_mb_qp *qp)
361 unsigned n = snprintf(qp->name, sizeof(qp->name),
362 "aesni_mb_pmd_%u_qp_%u",
363 dev->data->dev_id, qp->id);
365 if (n > sizeof(qp->name))
371 /** Create a ring to place processed operations on */
372 static struct rte_ring *
373 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
374 const char *str, unsigned int ring_size, int socket_id)
377 char ring_name[RTE_CRYPTODEV_NAME_LEN];
379 unsigned int n = snprintf(ring_name, sizeof(ring_name),
383 if (n > sizeof(ring_name))
386 r = rte_ring_lookup(ring_name);
388 if (rte_ring_get_size(r) >= ring_size) {
389 MB_LOG_INFO("Reusing existing ring %s for processed ops",
394 MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
399 return rte_ring_create(ring_name, ring_size, socket_id,
400 RING_F_SP_ENQ | RING_F_SC_DEQ);
403 /** Setup a queue pair */
405 aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
406 const struct rte_cryptodev_qp_conf *qp_conf,
409 struct aesni_mb_qp *qp = NULL;
410 struct aesni_mb_private *internals = dev->data->dev_private;
412 /* Free memory prior to re-allocation if needed. */
413 if (dev->data->queue_pairs[qp_id] != NULL)
414 aesni_mb_pmd_qp_release(dev, qp_id);
416 /* Allocate the queue pair data structure. */
417 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
418 RTE_CACHE_LINE_SIZE, socket_id);
423 dev->data->queue_pairs[qp_id] = qp;
425 if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
426 goto qp_setup_cleanup;
429 qp->op_fns = &job_ops[internals->vector_mode];
431 qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
432 "ingress", qp_conf->nb_descriptors, socket_id);
433 if (qp->ingress_queue == NULL)
434 goto qp_setup_cleanup;
436 qp->sess_mp = dev->data->session_pool;
438 memset(&qp->stats, 0, sizeof(qp->stats));
440 /* Initialise multi-buffer manager */
441 (*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
451 /** Start queue pair */
453 aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
454 __rte_unused uint16_t queue_pair_id)
459 /** Stop queue pair */
461 aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
462 __rte_unused uint16_t queue_pair_id)
467 /** Return the number of allocated queue pairs */
469 aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
471 return dev->data->nb_queue_pairs;
474 /** Returns the size of the aesni multi-buffer session structure */
476 aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
478 return sizeof(struct aesni_mb_session);
481 /** Configure a aesni multi-buffer session from a crypto xform chain */
483 aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
484 struct rte_crypto_sym_xform *xform, void *sess)
486 struct aesni_mb_private *internals = dev->data->dev_private;
488 if (unlikely(sess == NULL)) {
489 MB_LOG_ERR("invalid session struct");
493 if (aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
495 MB_LOG_ERR("failed configure session parameters");
502 /** Clear the memory of session so it doesn't leave key material behind */
504 aesni_mb_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
507 * Current just resetting the whole data structure, need to investigate
508 * whether a more selective reset of key would be more performant
511 memset(sess, 0, sizeof(struct aesni_mb_session));
514 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
515 .dev_configure = aesni_mb_pmd_config,
516 .dev_start = aesni_mb_pmd_start,
517 .dev_stop = aesni_mb_pmd_stop,
518 .dev_close = aesni_mb_pmd_close,
520 .stats_get = aesni_mb_pmd_stats_get,
521 .stats_reset = aesni_mb_pmd_stats_reset,
523 .dev_infos_get = aesni_mb_pmd_info_get,
525 .queue_pair_setup = aesni_mb_pmd_qp_setup,
526 .queue_pair_release = aesni_mb_pmd_qp_release,
527 .queue_pair_start = aesni_mb_pmd_qp_start,
528 .queue_pair_stop = aesni_mb_pmd_qp_stop,
529 .queue_pair_count = aesni_mb_pmd_qp_count,
531 .session_get_size = aesni_mb_pmd_session_get_size,
532 .session_configure = aesni_mb_pmd_session_configure,
533 .session_clear = aesni_mb_pmd_session_clear
536 struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;