1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_cryptodev_pmd.h>
8 #include <rte_crypto.h>
10 #include "nitrox_sym.h"
11 #include "nitrox_device.h"
12 #include "nitrox_sym_capabilities.h"
13 #include "nitrox_qp.h"
14 #include "nitrox_sym_reqmgr.h"
15 #include "nitrox_sym_ctx.h"
16 #include "nitrox_logs.h"
18 #define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
19 #define MC_MAC_MISMATCH_ERR_CODE 0x4c
20 #define NPS_PKT_IN_INSTR_SIZE 64
21 #define IV_FROM_DPTR 1
22 #define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33
23 #define AES_KEYSIZE_128 16
24 #define AES_KEYSIZE_192 24
25 #define AES_KEYSIZE_256 32
28 struct nitrox_sym_device {
29 struct rte_cryptodev *cdev;
30 struct nitrox_device *ndev;
63 uint8_t nitrox_sym_drv_id;
64 static const char nitrox_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_NITROX_PMD);
65 static const struct rte_driver nitrox_rte_sym_drv = {
66 .name = nitrox_sym_drv_name,
67 .alias = nitrox_sym_drv_name
70 static int nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev,
74 nitrox_sym_dev_config(struct rte_cryptodev *cdev,
75 struct rte_cryptodev_config *config)
77 struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
78 struct nitrox_device *ndev = sym_dev->ndev;
80 if (config->nb_queue_pairs > ndev->nr_queues) {
81 NITROX_LOG(ERR, "Invalid queue pairs, max supported %d\n",
90 nitrox_sym_dev_start(struct rte_cryptodev *cdev)
92 /* SE cores initialization is done in PF */
98 nitrox_sym_dev_stop(struct rte_cryptodev *cdev)
100 /* SE cores cleanup is done in PF */
105 nitrox_sym_dev_close(struct rte_cryptodev *cdev)
109 for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
110 ret = nitrox_sym_dev_qp_release(cdev, i);
119 nitrox_sym_dev_info_get(struct rte_cryptodev *cdev,
120 struct rte_cryptodev_info *info)
122 struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
123 struct nitrox_device *ndev = sym_dev->ndev;
128 info->max_nb_queue_pairs = ndev->nr_queues;
129 info->feature_flags = cdev->feature_flags;
130 info->capabilities = nitrox_get_sym_capabilities();
131 info->driver_id = nitrox_sym_drv_id;
132 info->sym.max_nb_sessions = 0;
136 nitrox_sym_dev_stats_get(struct rte_cryptodev *cdev,
137 struct rte_cryptodev_stats *stats)
141 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
142 struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
147 stats->enqueued_count += qp->stats.enqueued_count;
148 stats->dequeued_count += qp->stats.dequeued_count;
149 stats->enqueue_err_count += qp->stats.enqueue_err_count;
150 stats->dequeue_err_count += qp->stats.dequeue_err_count;
155 nitrox_sym_dev_stats_reset(struct rte_cryptodev *cdev)
159 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
160 struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
165 memset(&qp->stats, 0, sizeof(qp->stats));
170 nitrox_sym_dev_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id,
171 const struct rte_cryptodev_qp_conf *qp_conf,
174 struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
175 struct nitrox_device *ndev = sym_dev->ndev;
176 struct nitrox_qp *qp = NULL;
179 NITROX_LOG(DEBUG, "queue %d\n", qp_id);
180 if (qp_id >= ndev->nr_queues) {
181 NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
182 qp_id, ndev->nr_queues);
186 if (cdev->data->queue_pairs[qp_id]) {
187 err = nitrox_sym_dev_qp_release(cdev, qp_id);
192 qp = rte_zmalloc_socket("nitrox PMD qp", sizeof(*qp),
196 NITROX_LOG(ERR, "Failed to allocate nitrox qp\n");
201 err = nitrox_qp_setup(qp, ndev->bar_addr, cdev->data->name,
202 qp_conf->nb_descriptors, NPS_PKT_IN_INSTR_SIZE,
207 qp->sr_mp = nitrox_sym_req_pool_create(cdev, qp->count, qp_id,
209 if (unlikely(!qp->sr_mp))
212 cdev->data->queue_pairs[qp_id] = qp;
213 NITROX_LOG(DEBUG, "queue %d setup done\n", qp_id);
217 nitrox_qp_release(qp, ndev->bar_addr);
224 nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, uint16_t qp_id)
226 struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
227 struct nitrox_device *ndev = sym_dev->ndev;
228 struct nitrox_qp *qp;
231 NITROX_LOG(DEBUG, "queue %d\n", qp_id);
232 if (qp_id >= ndev->nr_queues) {
233 NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
234 qp_id, ndev->nr_queues);
238 qp = cdev->data->queue_pairs[qp_id];
240 NITROX_LOG(DEBUG, "queue %u already freed\n", qp_id);
244 if (!nitrox_qp_is_empty(qp)) {
245 NITROX_LOG(ERR, "queue %d not empty\n", qp_id);
249 cdev->data->queue_pairs[qp_id] = NULL;
250 err = nitrox_qp_release(qp, ndev->bar_addr);
251 nitrox_sym_req_pool_free(qp->sr_mp);
253 NITROX_LOG(DEBUG, "queue %d release done\n", qp_id);
258 nitrox_sym_dev_sess_get_size(__rte_unused struct rte_cryptodev *cdev)
260 return sizeof(struct nitrox_crypto_ctx);
263 static enum nitrox_chain
264 get_crypto_chain_order(const struct rte_crypto_sym_xform *xform)
266 enum nitrox_chain res = NITROX_CHAIN_NOT_SUPPORTED;
268 if (unlikely(xform == NULL))
271 switch (xform->type) {
272 case RTE_CRYPTO_SYM_XFORM_AUTH:
273 if (xform->next == NULL) {
274 res = NITROX_CHAIN_NOT_SUPPORTED;
275 } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
276 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
277 xform->next->cipher.op ==
278 RTE_CRYPTO_CIPHER_OP_DECRYPT) {
279 res = NITROX_CHAIN_AUTH_CIPHER;
281 NITROX_LOG(ERR, "auth op %d, cipher op %d\n",
282 xform->auth.op, xform->next->cipher.op);
286 case RTE_CRYPTO_SYM_XFORM_CIPHER:
287 if (xform->next == NULL) {
288 res = NITROX_CHAIN_CIPHER_ONLY;
289 } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
290 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
291 xform->next->auth.op ==
292 RTE_CRYPTO_AUTH_OP_GENERATE) {
293 res = NITROX_CHAIN_CIPHER_AUTH;
295 NITROX_LOG(ERR, "cipher op %d, auth op %d\n",
296 xform->cipher.op, xform->next->auth.op);
307 static enum flexi_cipher
308 get_flexi_cipher_type(enum rte_crypto_cipher_algorithm algo, bool *is_aes)
310 enum flexi_cipher type;
313 case RTE_CRYPTO_CIPHER_AES_CBC:
314 type = CIPHER_AES_CBC;
317 case RTE_CRYPTO_CIPHER_3DES_CBC:
318 type = CIPHER_3DES_CBC;
322 type = CIPHER_INVALID;
323 NITROX_LOG(ERR, "Algorithm not supported %d\n", algo);
331 flexi_aes_keylen(size_t keylen, bool is_aes)
339 case AES_KEYSIZE_128:
342 case AES_KEYSIZE_192:
345 case AES_KEYSIZE_256:
349 NITROX_LOG(ERR, "Invalid keylen %zu\n", keylen);
350 aes_keylen = -EINVAL;
358 crypto_key_is_valid(struct rte_crypto_cipher_xform *xform,
359 struct flexi_crypto_context *fctx)
361 if (unlikely(xform->key.length > sizeof(fctx->crypto.key))) {
362 NITROX_LOG(ERR, "Invalid crypto key length %d\n",
371 configure_cipher_ctx(struct rte_crypto_cipher_xform *xform,
372 struct nitrox_crypto_ctx *ctx)
374 enum flexi_cipher type;
375 bool cipher_is_aes = false;
377 struct flexi_crypto_context *fctx = &ctx->fctx;
379 type = get_flexi_cipher_type(xform->algo, &cipher_is_aes);
380 if (unlikely(type == CIPHER_INVALID))
383 aes_keylen = flexi_aes_keylen(xform->key.length, cipher_is_aes);
384 if (unlikely(aes_keylen < 0))
387 if (unlikely(!cipher_is_aes && !crypto_key_is_valid(xform, fctx)))
390 if (unlikely(xform->iv.length > MAX_IV_LEN))
393 fctx->flags = rte_be_to_cpu_64(fctx->flags);
394 fctx->w0.cipher_type = type;
395 fctx->w0.aes_keylen = aes_keylen;
396 fctx->w0.iv_source = IV_FROM_DPTR;
397 fctx->flags = rte_cpu_to_be_64(fctx->flags);
398 memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key));
399 memcpy(fctx->crypto.key, xform->key.data, xform->key.length);
401 ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
402 ctx->req_op = (xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
403 NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT;
404 ctx->iv.offset = xform->iv.offset;
405 ctx->iv.length = xform->iv.length;
409 static enum flexi_auth
410 get_flexi_auth_type(enum rte_crypto_auth_algorithm algo)
412 enum flexi_auth type;
415 case RTE_CRYPTO_AUTH_SHA1_HMAC:
418 case RTE_CRYPTO_AUTH_SHA224_HMAC:
419 type = AUTH_SHA2_SHA224;
421 case RTE_CRYPTO_AUTH_SHA256_HMAC:
422 type = AUTH_SHA2_SHA256;
425 NITROX_LOG(ERR, "Algorithm not supported %d\n", algo);
434 auth_key_digest_is_valid(struct rte_crypto_auth_xform *xform,
435 struct flexi_crypto_context *fctx)
437 if (unlikely(!xform->key.data && xform->key.length)) {
438 NITROX_LOG(ERR, "Invalid auth key\n");
442 if (unlikely(xform->key.length > sizeof(fctx->auth.opad))) {
443 NITROX_LOG(ERR, "Invalid auth key length %d\n",
452 configure_auth_ctx(struct rte_crypto_auth_xform *xform,
453 struct nitrox_crypto_ctx *ctx)
455 enum flexi_auth type;
456 struct flexi_crypto_context *fctx = &ctx->fctx;
458 type = get_flexi_auth_type(xform->algo);
459 if (unlikely(type == AUTH_INVALID))
462 if (unlikely(!auth_key_digest_is_valid(xform, fctx)))
465 ctx->auth_op = xform->op;
466 ctx->auth_algo = xform->algo;
467 ctx->digest_length = xform->digest_length;
469 fctx->flags = rte_be_to_cpu_64(fctx->flags);
470 fctx->w0.hash_type = type;
471 fctx->w0.auth_input_type = 1;
472 fctx->w0.mac_len = xform->digest_length;
473 fctx->flags = rte_cpu_to_be_64(fctx->flags);
474 memset(&fctx->auth, 0, sizeof(fctx->auth));
475 memcpy(fctx->auth.opad, xform->key.data, xform->key.length);
480 nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev,
481 struct rte_crypto_sym_xform *xform,
482 struct rte_cryptodev_sym_session *sess,
483 struct rte_mempool *mempool)
486 struct nitrox_crypto_ctx *ctx;
487 struct rte_crypto_cipher_xform *cipher_xform = NULL;
488 struct rte_crypto_auth_xform *auth_xform = NULL;
490 if (rte_mempool_get(mempool, &mp_obj)) {
491 NITROX_LOG(ERR, "Couldn't allocate context\n");
496 ctx->nitrox_chain = get_crypto_chain_order(xform);
497 switch (ctx->nitrox_chain) {
498 case NITROX_CHAIN_CIPHER_AUTH:
499 cipher_xform = &xform->cipher;
500 auth_xform = &xform->next->auth;
502 case NITROX_CHAIN_AUTH_CIPHER:
503 auth_xform = &xform->auth;
504 cipher_xform = &xform->next->cipher;
507 NITROX_LOG(ERR, "Crypto chain not supported\n");
511 if (cipher_xform && unlikely(configure_cipher_ctx(cipher_xform, ctx))) {
512 NITROX_LOG(ERR, "Failed to configure cipher ctx\n");
516 if (auth_xform && unlikely(configure_auth_ctx(auth_xform, ctx))) {
517 NITROX_LOG(ERR, "Failed to configure auth ctx\n");
521 ctx->iova = rte_mempool_virt2iova(ctx);
522 set_sym_session_private_data(sess, cdev->driver_id, ctx);
525 rte_mempool_put(mempool, mp_obj);
530 nitrox_sym_dev_sess_clear(struct rte_cryptodev *cdev,
531 struct rte_cryptodev_sym_session *sess)
533 struct nitrox_crypto_ctx *ctx = get_sym_session_private_data(sess,
535 struct rte_mempool *sess_mp;
540 memset(ctx, 0, sizeof(*ctx));
541 sess_mp = rte_mempool_from_obj(ctx);
542 set_sym_session_private_data(sess, cdev->driver_id, NULL);
543 rte_mempool_put(sess_mp, ctx);
546 static struct nitrox_crypto_ctx *
547 get_crypto_ctx(struct rte_crypto_op *op)
549 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
550 if (likely(op->sym->session))
551 return get_sym_session_private_data(op->sym->session,
559 nitrox_enq_single_op(struct nitrox_qp *qp, struct rte_crypto_op *op)
561 struct nitrox_crypto_ctx *ctx;
562 struct nitrox_softreq *sr;
565 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
566 ctx = get_crypto_ctx(op);
567 if (unlikely(!ctx)) {
568 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
572 if (unlikely(rte_mempool_get(qp->sr_mp, (void **)&sr)))
575 err = nitrox_process_se_req(qp->qno, op, ctx, sr);
577 rte_mempool_put(qp->sr_mp, sr);
578 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
582 nitrox_qp_enqueue(qp, nitrox_sym_instr_addr(sr), sr);
587 nitrox_sym_dev_enq_burst(void *queue_pair, struct rte_crypto_op **ops,
590 struct nitrox_qp *qp = queue_pair;
591 uint16_t free_slots = 0;
595 free_slots = nitrox_qp_free_count(qp);
596 if (nb_ops > free_slots)
599 for (cnt = 0; cnt < nb_ops; cnt++) {
600 if (unlikely(nitrox_enq_single_op(qp, ops[cnt]))) {
606 nitrox_ring_dbell(qp, cnt);
607 qp->stats.enqueued_count += cnt;
609 qp->stats.enqueue_err_count++;
615 nitrox_deq_single_op(struct nitrox_qp *qp, struct rte_crypto_op **op_ptr)
617 struct nitrox_softreq *sr;
619 struct rte_crypto_op *op;
621 sr = nitrox_qp_get_softreq(qp);
622 ret = nitrox_check_se_req(sr, op_ptr);
627 nitrox_qp_dequeue(qp);
628 rte_mempool_put(qp->sr_mp, sr);
630 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
631 qp->stats.dequeued_count++;
636 if (ret == MC_MAC_MISMATCH_ERR_CODE)
637 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
639 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
641 qp->stats.dequeue_err_count++;
646 nitrox_sym_dev_deq_burst(void *queue_pair, struct rte_crypto_op **ops,
649 struct nitrox_qp *qp = queue_pair;
650 uint16_t filled_slots = nitrox_qp_used_count(qp);
653 if (nb_ops > filled_slots)
654 nb_ops = filled_slots;
656 for (cnt = 0; cnt < nb_ops; cnt++)
657 if (nitrox_deq_single_op(qp, &ops[cnt]))
663 static struct rte_cryptodev_ops nitrox_cryptodev_ops = {
664 .dev_configure = nitrox_sym_dev_config,
665 .dev_start = nitrox_sym_dev_start,
666 .dev_stop = nitrox_sym_dev_stop,
667 .dev_close = nitrox_sym_dev_close,
668 .dev_infos_get = nitrox_sym_dev_info_get,
669 .stats_get = nitrox_sym_dev_stats_get,
670 .stats_reset = nitrox_sym_dev_stats_reset,
671 .queue_pair_setup = nitrox_sym_dev_qp_setup,
672 .queue_pair_release = nitrox_sym_dev_qp_release,
673 .sym_session_get_size = nitrox_sym_dev_sess_get_size,
674 .sym_session_configure = nitrox_sym_dev_sess_configure,
675 .sym_session_clear = nitrox_sym_dev_sess_clear
679 nitrox_sym_pmd_create(struct nitrox_device *ndev)
681 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
682 struct rte_cryptodev_pmd_init_params init_params = {
684 .socket_id = ndev->pdev->device.numa_node,
685 .private_data_size = sizeof(struct nitrox_sym_device)
687 struct rte_cryptodev *cdev;
689 rte_pci_device_name(&ndev->pdev->addr, name, sizeof(name));
690 snprintf(name + strlen(name), RTE_CRYPTODEV_NAME_MAX_LEN - strlen(name),
692 ndev->rte_sym_dev.driver = &nitrox_rte_sym_drv;
693 ndev->rte_sym_dev.numa_node = ndev->pdev->device.numa_node;
694 ndev->rte_sym_dev.devargs = NULL;
695 cdev = rte_cryptodev_pmd_create(name, &ndev->rte_sym_dev,
698 NITROX_LOG(ERR, "Cryptodev '%s' creation failed\n", name);
702 ndev->rte_sym_dev.name = cdev->data->name;
703 cdev->driver_id = nitrox_sym_drv_id;
704 cdev->dev_ops = &nitrox_cryptodev_ops;
705 cdev->enqueue_burst = nitrox_sym_dev_enq_burst;
706 cdev->dequeue_burst = nitrox_sym_dev_deq_burst;
707 cdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
708 RTE_CRYPTODEV_FF_HW_ACCELERATED |
709 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
710 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
711 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
712 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
713 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
714 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
716 ndev->sym_dev = cdev->data->dev_private;
717 ndev->sym_dev->cdev = cdev;
718 ndev->sym_dev->ndev = ndev;
719 NITROX_LOG(DEBUG, "Created cryptodev '%s', dev_id %d, drv_id %d\n",
720 cdev->data->name, cdev->data->dev_id, nitrox_sym_drv_id);
725 nitrox_sym_pmd_destroy(struct nitrox_device *ndev)
727 return rte_cryptodev_pmd_destroy(ndev->sym_dev->cdev);
730 static struct cryptodev_driver nitrox_crypto_drv;
731 RTE_PMD_REGISTER_CRYPTO_DRIVER(nitrox_crypto_drv,