1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 #include <rte_cryptodev_pmd.h>
11 #include "aesni_gcm_pmd_private.h"
13 static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
14 { /* AES GMAC (AUTH) */
15 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
17 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
19 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
40 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
42 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
44 .algo = RTE_CRYPTO_AEAD_AES_GCM,
69 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
72 /** Configure device */
74 aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
75 __rte_unused struct rte_cryptodev_config *config)
82 aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
89 aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
95 aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
101 /** Get device statistics */
103 aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
104 struct rte_cryptodev_stats *stats)
108 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
109 struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
111 stats->enqueued_count += qp->qp_stats.enqueued_count;
112 stats->dequeued_count += qp->qp_stats.dequeued_count;
114 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
115 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
119 /** Reset device statistics */
121 aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
125 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
126 struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
128 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
133 /** Get device info */
135 aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
136 struct rte_cryptodev_info *dev_info)
138 struct aesni_gcm_private *internals = dev->data->dev_private;
140 if (dev_info != NULL) {
141 dev_info->driver_id = dev->driver_id;
142 dev_info->feature_flags = dev->feature_flags;
143 dev_info->capabilities = aesni_gcm_pmd_capabilities;
145 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
146 /* No limit of number of sessions */
147 dev_info->sym.max_nb_sessions = 0;
151 /** Release queue pair */
153 aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
155 if (dev->data->queue_pairs[qp_id] != NULL) {
156 rte_free(dev->data->queue_pairs[qp_id]);
157 dev->data->queue_pairs[qp_id] = NULL;
162 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
164 aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
165 struct aesni_gcm_qp *qp)
167 unsigned n = snprintf(qp->name, sizeof(qp->name),
168 "aesni_gcm_pmd_%u_qp_%u",
169 dev->data->dev_id, qp->id);
171 if (n >= sizeof(qp->name))
177 /** Create a ring to place process packets on */
178 static struct rte_ring *
179 aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
180 unsigned ring_size, int socket_id)
184 r = rte_ring_lookup(qp->name);
186 if (rte_ring_get_size(r) >= ring_size) {
187 AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed"
188 " packets", qp->name);
191 AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed"
192 " packets", qp->name);
196 return rte_ring_create(qp->name, ring_size, socket_id,
197 RING_F_SP_ENQ | RING_F_SC_DEQ);
200 /** Setup a queue pair */
202 aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
203 const struct rte_cryptodev_qp_conf *qp_conf,
204 int socket_id, struct rte_mempool *session_pool)
206 struct aesni_gcm_qp *qp = NULL;
207 struct aesni_gcm_private *internals = dev->data->dev_private;
209 /* Free memory prior to re-allocation if needed. */
210 if (dev->data->queue_pairs[qp_id] != NULL)
211 aesni_gcm_pmd_qp_release(dev, qp_id);
213 /* Allocate the queue pair data structure. */
214 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
215 RTE_CACHE_LINE_SIZE, socket_id);
220 dev->data->queue_pairs[qp_id] = qp;
222 if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
223 goto qp_setup_cleanup;
225 qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode];
227 qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
228 qp_conf->nb_descriptors, socket_id);
229 if (qp->processed_pkts == NULL)
230 goto qp_setup_cleanup;
232 qp->sess_mp = session_pool;
234 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
245 /** Return the number of allocated queue pairs */
247 aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
249 return dev->data->nb_queue_pairs;
252 /** Returns the size of the aesni gcm session structure */
254 aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
256 return sizeof(struct aesni_gcm_session);
259 /** Configure a aesni gcm session from a crypto xform chain */
261 aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
262 struct rte_crypto_sym_xform *xform,
263 struct rte_cryptodev_sym_session *sess,
264 struct rte_mempool *mempool)
266 void *sess_private_data;
268 struct aesni_gcm_private *internals = dev->data->dev_private;
270 if (unlikely(sess == NULL)) {
271 AESNI_GCM_LOG(ERR, "invalid session struct");
275 if (rte_mempool_get(mempool, &sess_private_data)) {
277 "Couldn't get object from session mempool");
280 ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode],
281 sess_private_data, xform);
283 AESNI_GCM_LOG(ERR, "failed configure session parameters");
285 /* Return session to mempool */
286 rte_mempool_put(mempool, sess_private_data);
290 set_session_private_data(sess, dev->driver_id,
296 /** Clear the memory of session so it doesn't leave key material behind */
298 aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev,
299 struct rte_cryptodev_sym_session *sess)
301 uint8_t index = dev->driver_id;
302 void *sess_priv = get_session_private_data(sess, index);
304 /* Zero out the whole structure */
306 memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
307 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
308 set_session_private_data(sess, index, NULL);
309 rte_mempool_put(sess_mp, sess_priv);
313 struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
314 .dev_configure = aesni_gcm_pmd_config,
315 .dev_start = aesni_gcm_pmd_start,
316 .dev_stop = aesni_gcm_pmd_stop,
317 .dev_close = aesni_gcm_pmd_close,
319 .stats_get = aesni_gcm_pmd_stats_get,
320 .stats_reset = aesni_gcm_pmd_stats_reset,
322 .dev_infos_get = aesni_gcm_pmd_info_get,
324 .queue_pair_setup = aesni_gcm_pmd_qp_setup,
325 .queue_pair_release = aesni_gcm_pmd_qp_release,
326 .queue_pair_count = aesni_gcm_pmd_qp_count,
328 .session_get_size = aesni_gcm_pmd_session_get_size,
329 .session_configure = aesni_gcm_pmd_session_configure,
330 .session_clear = aesni_gcm_pmd_session_clear
333 struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;