4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_cryptodev_pmd.h>
39 #include "aesni_gcm_pmd_private.h"
41 static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
42 { /* AES GMAC (AUTH) */
43 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
45 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
47 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
69 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
71 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
73 .algo = RTE_CRYPTO_AEAD_AES_GCM,
98 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
101 /** Configure device */
103 aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
104 __rte_unused struct rte_cryptodev_config *config)
111 aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
118 aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
124 aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
130 /** Get device statistics */
132 aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
133 struct rte_cryptodev_stats *stats)
137 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
138 struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
140 stats->enqueued_count += qp->qp_stats.enqueued_count;
141 stats->dequeued_count += qp->qp_stats.dequeued_count;
143 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
144 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
148 /** Reset device statistics */
150 aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
154 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
155 struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
157 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
162 /** Get device info */
164 aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
165 struct rte_cryptodev_info *dev_info)
167 struct aesni_gcm_private *internals = dev->data->dev_private;
169 if (dev_info != NULL) {
170 dev_info->dev_type = dev->dev_type;
171 dev_info->feature_flags = dev->feature_flags;
172 dev_info->capabilities = aesni_gcm_pmd_capabilities;
174 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
175 dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
179 /** Release queue pair */
181 aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
183 if (dev->data->queue_pairs[qp_id] != NULL) {
184 rte_free(dev->data->queue_pairs[qp_id]);
185 dev->data->queue_pairs[qp_id] = NULL;
190 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
192 aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
193 struct aesni_gcm_qp *qp)
195 unsigned n = snprintf(qp->name, sizeof(qp->name),
196 "aesni_gcm_pmd_%u_qp_%u",
197 dev->data->dev_id, qp->id);
199 if (n > sizeof(qp->name))
205 /** Create a ring to place process packets on */
206 static struct rte_ring *
207 aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
208 unsigned ring_size, int socket_id)
212 r = rte_ring_lookup(qp->name);
214 if (rte_ring_get_size(r) >= ring_size) {
215 GCM_LOG_INFO("Reusing existing ring %s for processed"
216 " packets", qp->name);
220 GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
221 " packets", qp->name);
225 return rte_ring_create(qp->name, ring_size, socket_id,
226 RING_F_SP_ENQ | RING_F_SC_DEQ);
229 /** Setup a queue pair */
231 aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
232 const struct rte_cryptodev_qp_conf *qp_conf,
235 struct aesni_gcm_qp *qp = NULL;
236 struct aesni_gcm_private *internals = dev->data->dev_private;
238 /* Free memory prior to re-allocation if needed. */
239 if (dev->data->queue_pairs[qp_id] != NULL)
240 aesni_gcm_pmd_qp_release(dev, qp_id);
242 /* Allocate the queue pair data structure. */
243 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
244 RTE_CACHE_LINE_SIZE, socket_id);
249 dev->data->queue_pairs[qp_id] = qp;
251 if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
252 goto qp_setup_cleanup;
254 qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode];
256 qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
257 qp_conf->nb_descriptors, socket_id);
258 if (qp->processed_pkts == NULL)
259 goto qp_setup_cleanup;
261 qp->sess_mp = dev->data->session_pool;
263 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
274 /** Start queue pair */
276 aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
277 __rte_unused uint16_t queue_pair_id)
282 /** Stop queue pair */
284 aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
285 __rte_unused uint16_t queue_pair_id)
290 /** Return the number of allocated queue pairs */
292 aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
294 return dev->data->nb_queue_pairs;
297 /** Returns the size of the aesni gcm session structure */
299 aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
301 return sizeof(struct aesni_gcm_session);
304 /** Configure a aesni gcm session from a crypto xform chain */
306 aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev,
307 struct rte_crypto_sym_xform *xform, void *sess)
309 struct aesni_gcm_private *internals = dev->data->dev_private;
311 if (unlikely(sess == NULL)) {
312 GCM_LOG_ERR("invalid session struct");
316 if (aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode],
318 GCM_LOG_ERR("failed configure session parameters");
325 /** Clear the memory of session so it doesn't leave key material behind */
327 aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
330 memset(sess, 0, sizeof(struct aesni_gcm_session));
333 struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
334 .dev_configure = aesni_gcm_pmd_config,
335 .dev_start = aesni_gcm_pmd_start,
336 .dev_stop = aesni_gcm_pmd_stop,
337 .dev_close = aesni_gcm_pmd_close,
339 .stats_get = aesni_gcm_pmd_stats_get,
340 .stats_reset = aesni_gcm_pmd_stats_reset,
342 .dev_infos_get = aesni_gcm_pmd_info_get,
344 .queue_pair_setup = aesni_gcm_pmd_qp_setup,
345 .queue_pair_release = aesni_gcm_pmd_qp_release,
346 .queue_pair_start = aesni_gcm_pmd_qp_start,
347 .queue_pair_stop = aesni_gcm_pmd_qp_stop,
348 .queue_pair_count = aesni_gcm_pmd_qp_count,
350 .session_get_size = aesni_gcm_pmd_session_get_size,
351 .session_configure = aesni_gcm_pmd_session_configure,
352 .session_clear = aesni_gcm_pmd_session_clear
355 struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;