4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_cryptodev_pmd.h>
39 #include "aesni_gcm_pmd_private.h"
41 static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
42 { /* AES GMAC (AUTH) */
43 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
45 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
47 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
68 { /* AES GCM (AUTH) */
69 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
71 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
73 .algo = RTE_CRYPTO_AUTH_AES_GCM,
94 { /* AES GCM (CIPHER) */
95 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
97 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
99 .algo = RTE_CRYPTO_CIPHER_AES_GCM,
114 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
117 /** Configure device */
119 aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
120 __rte_unused struct rte_cryptodev_config *config)
127 aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
134 aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
140 aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
146 /** Get device statistics */
148 aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
149 struct rte_cryptodev_stats *stats)
153 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
154 struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
156 stats->enqueued_count += qp->qp_stats.enqueued_count;
157 stats->dequeued_count += qp->qp_stats.dequeued_count;
159 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
160 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
164 /** Reset device statistics */
166 aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
170 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
171 struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
173 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
178 /** Get device info */
180 aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
181 struct rte_cryptodev_info *dev_info)
183 struct aesni_gcm_private *internals = dev->data->dev_private;
185 if (dev_info != NULL) {
186 dev_info->dev_type = dev->dev_type;
187 dev_info->feature_flags = dev->feature_flags;
188 dev_info->capabilities = aesni_gcm_pmd_capabilities;
190 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
191 dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
195 /** Release queue pair */
197 aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
199 if (dev->data->queue_pairs[qp_id] != NULL) {
200 rte_free(dev->data->queue_pairs[qp_id]);
201 dev->data->queue_pairs[qp_id] = NULL;
206 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
208 aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
209 struct aesni_gcm_qp *qp)
211 unsigned n = snprintf(qp->name, sizeof(qp->name),
212 "aesni_gcm_pmd_%u_qp_%u",
213 dev->data->dev_id, qp->id);
215 if (n > sizeof(qp->name))
221 /** Create a ring to place process packets on */
222 static struct rte_ring *
223 aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
224 unsigned ring_size, int socket_id)
228 r = rte_ring_lookup(qp->name);
230 if (rte_ring_get_size(r) >= ring_size) {
231 GCM_LOG_INFO("Reusing existing ring %s for processed"
232 " packets", qp->name);
236 GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
237 " packets", qp->name);
241 return rte_ring_create(qp->name, ring_size, socket_id,
242 RING_F_SP_ENQ | RING_F_SC_DEQ);
245 /** Setup a queue pair */
247 aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
248 const struct rte_cryptodev_qp_conf *qp_conf,
251 struct aesni_gcm_qp *qp = NULL;
253 /* Free memory prior to re-allocation if needed. */
254 if (dev->data->queue_pairs[qp_id] != NULL)
255 aesni_gcm_pmd_qp_release(dev, qp_id);
257 /* Allocate the queue pair data structure. */
258 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
259 RTE_CACHE_LINE_SIZE, socket_id);
264 dev->data->queue_pairs[qp_id] = qp;
266 if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
267 goto qp_setup_cleanup;
269 qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
270 qp_conf->nb_descriptors, socket_id);
271 if (qp->processed_pkts == NULL)
272 goto qp_setup_cleanup;
274 qp->sess_mp = dev->data->session_pool;
276 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
287 /** Start queue pair */
289 aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
290 __rte_unused uint16_t queue_pair_id)
295 /** Stop queue pair */
297 aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
298 __rte_unused uint16_t queue_pair_id)
303 /** Return the number of allocated queue pairs */
305 aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
307 return dev->data->nb_queue_pairs;
310 /** Returns the size of the aesni gcm session structure */
312 aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
314 return sizeof(struct aesni_gcm_session);
317 /** Configure a aesni gcm session from a crypto xform chain */
319 aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
320 struct rte_crypto_sym_xform *xform, void *sess)
322 if (unlikely(sess == NULL)) {
323 GCM_LOG_ERR("invalid session struct");
327 if (aesni_gcm_set_session_parameters(sess, xform) != 0) {
328 GCM_LOG_ERR("failed configure session parameters");
335 /** Clear the memory of session so it doesn't leave key material behind */
337 aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
340 memset(sess, 0, sizeof(struct aesni_gcm_session));
343 struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
344 .dev_configure = aesni_gcm_pmd_config,
345 .dev_start = aesni_gcm_pmd_start,
346 .dev_stop = aesni_gcm_pmd_stop,
347 .dev_close = aesni_gcm_pmd_close,
349 .stats_get = aesni_gcm_pmd_stats_get,
350 .stats_reset = aesni_gcm_pmd_stats_reset,
352 .dev_infos_get = aesni_gcm_pmd_info_get,
354 .queue_pair_setup = aesni_gcm_pmd_qp_setup,
355 .queue_pair_release = aesni_gcm_pmd_qp_release,
356 .queue_pair_start = aesni_gcm_pmd_qp_start,
357 .queue_pair_stop = aesni_gcm_pmd_qp_stop,
358 .queue_pair_count = aesni_gcm_pmd_qp_count,
360 .session_get_size = aesni_gcm_pmd_session_get_size,
361 .session_configure = aesni_gcm_pmd_session_configure,
362 .session_clear = aesni_gcm_pmd_session_clear
365 struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;