1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
7 #include <rte_common.h>
8 #include <rte_cryptodev_pmd.h>
9 #include <rte_malloc.h>
11 #include "ccp_pmd_private.h"
13 #include "ccp_crypto.h"
15 static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
17 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
19 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
21 .algo = RTE_CRYPTO_AUTH_SHA1,
38 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
40 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
42 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
59 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
61 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
63 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
79 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
81 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
83 .algo = RTE_CRYPTO_CIPHER_AES_ECB,
99 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
101 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
103 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
119 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
121 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
123 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
139 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
141 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
143 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
159 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
161 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
163 .algo = RTE_CRYPTO_AEAD_AES_GCM,
188 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
192 ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
193 struct rte_cryptodev_config *config __rte_unused)
199 ccp_pmd_start(struct rte_cryptodev *dev)
201 return ccp_dev_start(dev);
205 ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
211 ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
217 ccp_pmd_stats_get(struct rte_cryptodev *dev,
218 struct rte_cryptodev_stats *stats)
222 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
223 struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
225 stats->enqueued_count += qp->qp_stats.enqueued_count;
226 stats->dequeued_count += qp->qp_stats.dequeued_count;
228 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
229 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
235 ccp_pmd_stats_reset(struct rte_cryptodev *dev)
239 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
240 struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
242 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
247 ccp_pmd_info_get(struct rte_cryptodev *dev,
248 struct rte_cryptodev_info *dev_info)
250 struct ccp_private *internals = dev->data->dev_private;
252 if (dev_info != NULL) {
253 dev_info->driver_id = dev->driver_id;
254 dev_info->feature_flags = dev->feature_flags;
255 dev_info->capabilities = ccp_pmd_capabilities;
256 dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
257 dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
262 ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
266 if (dev->data->queue_pairs[qp_id] != NULL) {
267 qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
268 rte_ring_free(qp->processed_pkts);
269 rte_mempool_free(qp->batch_mp);
271 dev->data->queue_pairs[qp_id] = NULL;
277 ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
280 unsigned int n = snprintf(qp->name, sizeof(qp->name),
282 dev->data->dev_id, qp->id);
284 if (n > sizeof(qp->name))
290 static struct rte_ring *
291 ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
292 unsigned int ring_size, int socket_id)
296 r = rte_ring_lookup(qp->name);
298 if (r->size >= ring_size) {
300 "Reusing ring %s for processed packets",
305 "Unable to reuse ring %s for processed packets",
310 return rte_ring_create(qp->name, ring_size, socket_id,
311 RING_F_SP_ENQ | RING_F_SC_DEQ);
315 ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
316 const struct rte_cryptodev_qp_conf *qp_conf,
317 int socket_id, struct rte_mempool *session_pool)
319 struct ccp_private *internals = dev->data->dev_private;
323 if (qp_id >= internals->max_nb_qpairs) {
324 CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
325 qp_id, internals->max_nb_qpairs);
329 /* Free memory prior to re-allocation if needed. */
330 if (dev->data->queue_pairs[qp_id] != NULL)
331 ccp_pmd_qp_release(dev, qp_id);
333 /* Allocate the queue pair data structure. */
334 qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
335 RTE_CACHE_LINE_SIZE, socket_id);
337 CCP_LOG_ERR("Failed to allocate queue pair memory");
343 dev->data->queue_pairs[qp_id] = qp;
345 retval = ccp_pmd_qp_set_unique_name(dev, qp);
347 CCP_LOG_ERR("Failed to create unique name for ccp qp");
348 goto qp_setup_cleanup;
351 qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
352 qp_conf->nb_descriptors, socket_id);
353 if (qp->processed_pkts == NULL) {
354 CCP_LOG_ERR("Failed to create batch info ring");
355 goto qp_setup_cleanup;
358 qp->sess_mp = session_pool;
360 /* mempool for batch info */
361 qp->batch_mp = rte_mempool_create(
363 qp_conf->nb_descriptors,
364 sizeof(struct ccp_batch_info),
366 0, NULL, NULL, NULL, NULL,
368 if (qp->batch_mp == NULL)
369 goto qp_setup_cleanup;
370 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
374 dev->data->queue_pairs[qp_id] = NULL;
381 ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
382 uint16_t queue_pair_id __rte_unused)
388 ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
389 uint16_t queue_pair_id __rte_unused)
395 ccp_pmd_qp_count(struct rte_cryptodev *dev)
397 return dev->data->nb_queue_pairs;
401 ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
403 return sizeof(struct ccp_session);
407 ccp_pmd_session_configure(struct rte_cryptodev *dev,
408 struct rte_crypto_sym_xform *xform,
409 struct rte_cryptodev_sym_session *sess,
410 struct rte_mempool *mempool)
413 void *sess_private_data;
415 if (unlikely(sess == NULL || xform == NULL)) {
416 CCP_LOG_ERR("Invalid session struct or xform");
420 if (rte_mempool_get(mempool, &sess_private_data)) {
421 CCP_LOG_ERR("Couldn't get object from session mempool");
424 ret = ccp_set_session_parameters(sess_private_data, xform);
426 CCP_LOG_ERR("failed configure session parameters");
428 /* Return session to mempool */
429 rte_mempool_put(mempool, sess_private_data);
432 set_session_private_data(sess, dev->driver_id,
439 ccp_pmd_session_clear(struct rte_cryptodev *dev,
440 struct rte_cryptodev_sym_session *sess)
442 uint8_t index = dev->driver_id;
443 void *sess_priv = get_session_private_data(sess, index);
446 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
448 rte_mempool_put(sess_mp, sess_priv);
449 memset(sess_priv, 0, sizeof(struct ccp_session));
450 set_session_private_data(sess, index, NULL);
454 struct rte_cryptodev_ops ccp_ops = {
455 .dev_configure = ccp_pmd_config,
456 .dev_start = ccp_pmd_start,
457 .dev_stop = ccp_pmd_stop,
458 .dev_close = ccp_pmd_close,
460 .stats_get = ccp_pmd_stats_get,
461 .stats_reset = ccp_pmd_stats_reset,
463 .dev_infos_get = ccp_pmd_info_get,
465 .queue_pair_setup = ccp_pmd_qp_setup,
466 .queue_pair_release = ccp_pmd_qp_release,
467 .queue_pair_start = ccp_pmd_qp_start,
468 .queue_pair_stop = ccp_pmd_qp_stop,
469 .queue_pair_count = ccp_pmd_qp_count,
471 .session_get_size = ccp_pmd_session_get_size,
472 .session_configure = ccp_pmd_session_configure,
473 .session_clear = ccp_pmd_session_clear,
476 struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;