4 * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_cryptodev_pmd.h>
39 #include "rte_aesni_mb_pmd_private.h"
42 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
44 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
46 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
48 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
65 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
67 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
69 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
86 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
88 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
90 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
107 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
109 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
111 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
128 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
130 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
132 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
149 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
151 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
153 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
169 { /* AES XCBC HMAC */
170 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
172 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
174 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
191 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
193 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
195 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
211 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
213 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
215 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
230 { /* AES DOCSIS BPI */
231 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
233 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
235 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
251 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
255 /** Configure device */
257 aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
258 __rte_unused struct rte_cryptodev_config *config)
265 aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
272 aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
278 aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
284 /** Get device statistics */
286 aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
287 struct rte_cryptodev_stats *stats)
291 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
292 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
294 stats->enqueued_count += qp->stats.enqueued_count;
295 stats->dequeued_count += qp->stats.dequeued_count;
297 stats->enqueue_err_count += qp->stats.enqueue_err_count;
298 stats->dequeue_err_count += qp->stats.dequeue_err_count;
302 /** Reset device statistics */
304 aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
308 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
309 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
311 memset(&qp->stats, 0, sizeof(qp->stats));
316 /** Get device info */
318 aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
319 struct rte_cryptodev_info *dev_info)
321 struct aesni_mb_private *internals = dev->data->dev_private;
323 if (dev_info != NULL) {
324 dev_info->dev_type = dev->dev_type;
325 dev_info->feature_flags = dev->feature_flags;
326 dev_info->capabilities = aesni_mb_pmd_capabilities;
327 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
328 dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
332 /** Release queue pair */
334 aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
336 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
337 struct rte_ring *r = NULL;
340 r = rte_ring_lookup(qp->name);
344 dev->data->queue_pairs[qp_id] = NULL;
349 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
351 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
352 struct aesni_mb_qp *qp)
354 unsigned n = snprintf(qp->name, sizeof(qp->name),
355 "aesni_mb_pmd_%u_qp_%u",
356 dev->data->dev_id, qp->id);
358 if (n > sizeof(qp->name))
364 /** Create a ring to place processed operations on */
365 static struct rte_ring *
366 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
367 const char *str, unsigned int ring_size, int socket_id)
370 char ring_name[RTE_CRYPTODEV_NAME_LEN];
372 unsigned int n = snprintf(ring_name, sizeof(ring_name),
376 if (n > sizeof(ring_name))
379 r = rte_ring_lookup(ring_name);
381 if (rte_ring_get_size(r) >= ring_size) {
382 MB_LOG_INFO("Reusing existing ring %s for processed ops",
387 MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
392 return rte_ring_create(ring_name, ring_size, socket_id,
393 RING_F_SP_ENQ | RING_F_SC_DEQ);
396 /** Setup a queue pair */
398 aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
399 const struct rte_cryptodev_qp_conf *qp_conf,
402 struct aesni_mb_qp *qp = NULL;
403 struct aesni_mb_private *internals = dev->data->dev_private;
405 /* Free memory prior to re-allocation if needed. */
406 if (dev->data->queue_pairs[qp_id] != NULL)
407 aesni_mb_pmd_qp_release(dev, qp_id);
409 /* Allocate the queue pair data structure. */
410 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
411 RTE_CACHE_LINE_SIZE, socket_id);
416 dev->data->queue_pairs[qp_id] = qp;
418 if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
419 goto qp_setup_cleanup;
422 qp->op_fns = &job_ops[internals->vector_mode];
424 qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
425 "ingress", qp_conf->nb_descriptors, socket_id);
426 if (qp->ingress_queue == NULL)
427 goto qp_setup_cleanup;
429 qp->sess_mp = dev->data->session_pool;
431 memset(&qp->stats, 0, sizeof(qp->stats));
433 /* Initialise multi-buffer manager */
434 (*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
444 /** Start queue pair */
446 aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
447 __rte_unused uint16_t queue_pair_id)
452 /** Stop queue pair */
454 aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
455 __rte_unused uint16_t queue_pair_id)
460 /** Return the number of allocated queue pairs */
462 aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
464 return dev->data->nb_queue_pairs;
467 /** Returns the size of the aesni multi-buffer session structure */
469 aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
471 return sizeof(struct aesni_mb_session);
474 /** Configure a aesni multi-buffer session from a crypto xform chain */
476 aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
477 struct rte_crypto_sym_xform *xform, void *sess)
479 struct aesni_mb_private *internals = dev->data->dev_private;
481 if (unlikely(sess == NULL)) {
482 MB_LOG_ERR("invalid session struct");
486 if (aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
488 MB_LOG_ERR("failed configure session parameters");
495 /** Clear the memory of session so it doesn't leave key material behind */
497 aesni_mb_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
500 * Current just resetting the whole data structure, need to investigate
501 * whether a more selective reset of key would be more performant
504 memset(sess, 0, sizeof(struct aesni_mb_session));
507 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
508 .dev_configure = aesni_mb_pmd_config,
509 .dev_start = aesni_mb_pmd_start,
510 .dev_stop = aesni_mb_pmd_stop,
511 .dev_close = aesni_mb_pmd_close,
513 .stats_get = aesni_mb_pmd_stats_get,
514 .stats_reset = aesni_mb_pmd_stats_reset,
516 .dev_infos_get = aesni_mb_pmd_info_get,
518 .queue_pair_setup = aesni_mb_pmd_qp_setup,
519 .queue_pair_release = aesni_mb_pmd_qp_release,
520 .queue_pair_start = aesni_mb_pmd_qp_start,
521 .queue_pair_stop = aesni_mb_pmd_qp_stop,
522 .queue_pair_count = aesni_mb_pmd_qp_count,
524 .session_get_size = aesni_mb_pmd_session_get_size,
525 .session_configure = aesni_mb_pmd_session_configure,
526 .session_clear = aesni_mb_pmd_session_clear
529 struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;