cryptodev: expose driver interface as internal
[dpdk.git] / drivers / crypto / aesni_gcm / aesni_gcm_pmd_ops.c
index 387f8d1..edb7275 100644 (file)
@@ -1,50 +1,22 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2016 Intel Corporation. All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
  */
 
 #include <string.h>
 
 #include <rte_common.h>
 #include <rte_malloc.h>
-#include <rte_cryptodev_pmd.h>
+#include <cryptodev_pmd.h>
 
 #include "aesni_gcm_pmd_private.h"
 
 static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
-       {       /* AES GCM (AUTH) */
+       {       /* AES GMAC (AUTH) */
                .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-               .sym = {
+               {.sym = {
                        .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
-                       .auth = {
-                               .algo = RTE_CRYPTO_AUTH_AES_GCM,
+                       {.auth = {
+                               .algo = RTE_CRYPTO_AUTH_AES_GMAC,
                                .block_size = 16,
                                .key_size = {
                                        .min = 16,
@@ -52,44 +24,55 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
                                        .increment = 8
                                },
                                .digest_size = {
-                                       .min = 8,
+                                       .min = 1,
                                        .max = 16,
-                                       .increment = 4
+                                       .increment = 1
                                },
-                               .aad_size = {
-                                       .min = 8,
+                               .iv_size = {
+                                       .min = 12,
                                        .max = 12,
-                                       .increment = 4
+                                       .increment = 0
                                }
-                       }
-               }
+                       }, }
+               }, }
        },
-       {       /* AES GCM (CIPHER) */
+       {       /* AES GCM */
                .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
-               .sym = {
-                       .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
-                       .cipher = {
-                               .algo = RTE_CRYPTO_CIPHER_AES_GCM,
+               {.sym = {
+                       .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+                       {.aead = {
+                               .algo = RTE_CRYPTO_AEAD_AES_GCM,
                                .block_size = 16,
                                .key_size = {
                                        .min = 16,
                                        .max = 32,
                                        .increment = 8
                                },
-                               .iv_size = {
-                                       .min = 16,
+                               .digest_size = {
+                                       .min = 1,
                                        .max = 16,
+                                       .increment = 1
+                               },
+                               .aad_size = {
+                                       .min = 0,
+                                       .max = 65535,
+                                       .increment = 1
+                               },
+                               .iv_size = {
+                                       .min = 12,
+                                       .max = 12,
                                        .increment = 0
                                }
-                       }
-               }
+                       }, }
+               }, }
        },
        RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
 /** Configure device */
 static int
-aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev)
+aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
+               __rte_unused struct rte_cryptodev_config *config)
 {
        return 0;
 }
@@ -155,12 +138,13 @@ aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
        struct aesni_gcm_private *internals = dev->data->dev_private;
 
        if (dev_info != NULL) {
-               dev_info->dev_type = dev->dev_type;
-                dev_info->feature_flags = dev->feature_flags;
-                dev_info->capabilities = aesni_gcm_pmd_capabilities;
+               dev_info->driver_id = dev->driver_id;
+               dev_info->feature_flags = dev->feature_flags;
+               dev_info->capabilities = aesni_gcm_pmd_capabilities;
 
                dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
-               dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+               /* No limit of number of sessions */
+               dev_info->sym.max_nb_sessions = 0;
        }
 }
 
@@ -169,6 +153,11 @@ static int
 aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
 {
        if (dev->data->queue_pairs[qp_id] != NULL) {
+               struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+               if (qp->processed_pkts)
+                       rte_ring_free(qp->processed_pkts);
+
                rte_free(dev->data->queue_pairs[qp_id]);
                dev->data->queue_pairs[qp_id] = NULL;
        }
@@ -184,7 +173,7 @@ aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
                        "aesni_gcm_pmd_%u_qp_%u",
                        dev->data->dev_id, qp->id);
 
-       if (n > sizeof(qp->name))
+       if (n >= sizeof(qp->name))
                return -1;
 
        return 0;
@@ -199,13 +188,12 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
 
        r = rte_ring_lookup(qp->name);
        if (r) {
-               if (r->prod.size >= ring_size) {
-                       GCM_LOG_INFO("Reusing existing ring %s for processed"
-                                       " packets", qp->name);
+               if (rte_ring_get_size(r) >= ring_size) {
+                       AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed"
+                               " packets", qp->name);
                        return r;
                }
-
-               GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
+               AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed"
                                " packets", qp->name);
                return NULL;
        }
@@ -218,7 +206,7 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
 static int
 aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
                const struct rte_cryptodev_qp_conf *qp_conf,
-                int socket_id)
+               int socket_id)
 {
        struct aesni_gcm_qp *qp = NULL;
        struct aesni_gcm_private *internals = dev->data->dev_private;
@@ -239,14 +227,15 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
        if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
                goto qp_setup_cleanup;
 
-       qp->ops = &gcm_ops[internals->vector_mode];
+       qp->ops = (const struct aesni_gcm_ops *)internals->ops;
 
        qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
                        qp_conf->nb_descriptors, socket_id);
        if (qp->processed_pkts == NULL)
                goto qp_setup_cleanup;
 
-       qp->sess_mp = dev->data->session_pool;
+       qp->sess_mp = qp_conf->mp_session;
+       qp->sess_mp_priv = qp_conf->mp_session_private;
 
        memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
 
@@ -259,63 +248,65 @@ qp_setup_cleanup:
        return -1;
 }
 
-/** Start queue pair */
-static int
-aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
-               __rte_unused uint16_t queue_pair_id)
-{
-       return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
-               __rte_unused uint16_t queue_pair_id)
-{
-       return -ENOTSUP;
-}
-
-/** Return the number of allocated queue pairs */
-static uint32_t
-aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
-{
-       return dev->data->nb_queue_pairs;
-}
-
 /** Returns the size of the aesni gcm session structure */
 static unsigned
-aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
 {
        return sizeof(struct aesni_gcm_session);
 }
 
 /** Configure a aesni gcm session from a crypto xform chain */
-static void *
-aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev,
-               struct rte_crypto_sym_xform *xform,     void *sess)
+static int
+aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
+               struct rte_crypto_sym_xform *xform,
+               struct rte_cryptodev_sym_session *sess,
+               struct rte_mempool *mempool)
 {
+       void *sess_private_data;
+       int ret;
        struct aesni_gcm_private *internals = dev->data->dev_private;
 
        if (unlikely(sess == NULL)) {
-               GCM_LOG_ERR("invalid session struct");
-               return NULL;
+               AESNI_GCM_LOG(ERR, "invalid session struct");
+               return -EINVAL;
        }
 
-       if (aesni_gcm_set_session_parameters(&gcm_ops[internals->vector_mode],
-                       sess, xform) != 0) {
-               GCM_LOG_ERR("failed configure session parameters");
-               return NULL;
+       if (rte_mempool_get(mempool, &sess_private_data)) {
+               AESNI_GCM_LOG(ERR,
+                               "Couldn't get object from session mempool");
+               return -ENOMEM;
+       }
+       ret = aesni_gcm_set_session_parameters(internals->ops,
+                               sess_private_data, xform);
+       if (ret != 0) {
+               AESNI_GCM_LOG(ERR, "failed configure session parameters");
+
+               /* Return session to mempool */
+               rte_mempool_put(mempool, sess_private_data);
+               return ret;
        }
 
-       return sess;
+       set_sym_session_private_data(sess, dev->driver_id,
+                       sess_private_data);
+
+       return 0;
 }
 
 /** Clear the memory of session so it doesn't leave key material behind */
 static void
-aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev,
+               struct rte_cryptodev_sym_session *sess)
 {
-       if (sess)
-               memset(sess, 0, sizeof(struct aesni_gcm_session));
+       uint8_t index = dev->driver_id;
+       void *sess_priv = get_sym_session_private_data(sess, index);
+
+       /* Zero out the whole structure */
+       if (sess_priv) {
+               memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
+               struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+               set_sym_session_private_data(sess, index, NULL);
+               rte_mempool_put(sess_mp, sess_priv);
+       }
 }
 
 struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
@@ -331,13 +322,12 @@ struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
 
                .queue_pair_setup       = aesni_gcm_pmd_qp_setup,
                .queue_pair_release     = aesni_gcm_pmd_qp_release,
-               .queue_pair_start       = aesni_gcm_pmd_qp_start,
-               .queue_pair_stop        = aesni_gcm_pmd_qp_stop,
-               .queue_pair_count       = aesni_gcm_pmd_qp_count,
 
-               .session_get_size       = aesni_gcm_pmd_session_get_size,
-               .session_configure      = aesni_gcm_pmd_session_configure,
-               .session_clear          = aesni_gcm_pmd_session_clear
+               .sym_cpu_process        = aesni_gcm_pmd_cpu_crypto_process,
+
+               .sym_session_get_size   = aesni_gcm_pmd_sym_session_get_size,
+               .sym_session_configure  = aesni_gcm_pmd_sym_session_configure,
+               .sym_session_clear      = aesni_gcm_pmd_sym_session_clear
 };
 
 struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;