1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_cryptodev_pmd.h>
8 #include <rte_crypto.h>
10 #include "nitrox_sym.h"
11 #include "nitrox_device.h"
12 #include "nitrox_sym_capabilities.h"
13 #include "nitrox_qp.h"
14 #include "nitrox_sym_reqmgr.h"
15 #include "nitrox_sym_ctx.h"
16 #include "nitrox_logs.h"
18 #define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
19 #define MC_MAC_MISMATCH_ERR_CODE 0x4c
20 #define NPS_PKT_IN_INSTR_SIZE 64
21 #define IV_FROM_DPTR 1
22 #define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33
23 #define AES_KEYSIZE_128 16
24 #define AES_KEYSIZE_192 24
25 #define AES_KEYSIZE_256 32
28 struct nitrox_sym_device {
29 struct rte_cryptodev *cdev;
30 struct nitrox_device *ndev;
63 uint8_t nitrox_sym_drv_id;
64 static const char nitrox_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_NITROX_PMD);
65 static const struct rte_driver nitrox_rte_sym_drv = {
66 .name = nitrox_sym_drv_name,
67 .alias = nitrox_sym_drv_name
70 static int nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev,
74 nitrox_sym_dev_config(struct rte_cryptodev *cdev,
75 struct rte_cryptodev_config *config)
77 struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
78 struct nitrox_device *ndev = sym_dev->ndev;
80 if (config->nb_queue_pairs > ndev->nr_queues) {
81 NITROX_LOG(ERR, "Invalid queue pairs, max supported %d\n",
90 nitrox_sym_dev_start(struct rte_cryptodev *cdev)
92 /* SE cores initialization is done in PF */
98 nitrox_sym_dev_stop(struct rte_cryptodev *cdev)
100 /* SE cores cleanup is done in PF */
105 nitrox_sym_dev_close(struct rte_cryptodev *cdev)
109 for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
110 ret = nitrox_sym_dev_qp_release(cdev, i);
119 nitrox_sym_dev_info_get(struct rte_cryptodev *cdev,
120 struct rte_cryptodev_info *info)
122 struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
123 struct nitrox_device *ndev = sym_dev->ndev;
128 info->max_nb_queue_pairs = ndev->nr_queues;
129 info->feature_flags = cdev->feature_flags;
130 info->capabilities = nitrox_get_sym_capabilities();
131 info->driver_id = nitrox_sym_drv_id;
132 info->sym.max_nb_sessions = 0;
136 nitrox_sym_dev_stats_get(struct rte_cryptodev *cdev,
137 struct rte_cryptodev_stats *stats)
141 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
142 struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
147 stats->enqueued_count += qp->stats.enqueued_count;
148 stats->dequeued_count += qp->stats.dequeued_count;
149 stats->enqueue_err_count += qp->stats.enqueue_err_count;
150 stats->dequeue_err_count += qp->stats.dequeue_err_count;
155 nitrox_sym_dev_stats_reset(struct rte_cryptodev *cdev)
159 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
160 struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
165 memset(&qp->stats, 0, sizeof(qp->stats));
170 nitrox_sym_dev_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id,
171 const struct rte_cryptodev_qp_conf *qp_conf,
174 struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
175 struct nitrox_device *ndev = sym_dev->ndev;
176 struct nitrox_qp *qp = NULL;
179 NITROX_LOG(DEBUG, "queue %d\n", qp_id);
180 if (qp_id >= ndev->nr_queues) {
181 NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
182 qp_id, ndev->nr_queues);
186 if (cdev->data->queue_pairs[qp_id]) {
187 err = nitrox_sym_dev_qp_release(cdev, qp_id);
192 qp = rte_zmalloc_socket("nitrox PMD qp", sizeof(*qp),
196 NITROX_LOG(ERR, "Failed to allocate nitrox qp\n");
201 err = nitrox_qp_setup(qp, ndev->bar_addr, cdev->data->name,
202 qp_conf->nb_descriptors, NPS_PKT_IN_INSTR_SIZE,
207 qp->sr_mp = nitrox_sym_req_pool_create(cdev, qp->count, qp_id,
209 if (unlikely(!qp->sr_mp))
212 cdev->data->queue_pairs[qp_id] = qp;
213 NITROX_LOG(DEBUG, "queue %d setup done\n", qp_id);
217 nitrox_qp_release(qp, ndev->bar_addr);
224 nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, uint16_t qp_id)
226 struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
227 struct nitrox_device *ndev = sym_dev->ndev;
228 struct nitrox_qp *qp;
231 NITROX_LOG(DEBUG, "queue %d\n", qp_id);
232 if (qp_id >= ndev->nr_queues) {
233 NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
234 qp_id, ndev->nr_queues);
238 qp = cdev->data->queue_pairs[qp_id];
240 NITROX_LOG(DEBUG, "queue %u already freed\n", qp_id);
244 if (!nitrox_qp_is_empty(qp)) {
245 NITROX_LOG(ERR, "queue %d not empty\n", qp_id);
249 cdev->data->queue_pairs[qp_id] = NULL;
250 err = nitrox_qp_release(qp, ndev->bar_addr);
251 nitrox_sym_req_pool_free(qp->sr_mp);
253 NITROX_LOG(DEBUG, "queue %d release done\n", qp_id);
258 nitrox_sym_dev_sess_get_size(__rte_unused struct rte_cryptodev *cdev)
260 return sizeof(struct nitrox_crypto_ctx);
263 static enum nitrox_chain
264 get_crypto_chain_order(const struct rte_crypto_sym_xform *xform)
266 enum nitrox_chain res = NITROX_CHAIN_NOT_SUPPORTED;
268 if (unlikely(xform == NULL))
271 switch (xform->type) {
272 case RTE_CRYPTO_SYM_XFORM_AUTH:
273 if (xform->next == NULL) {
274 res = NITROX_CHAIN_NOT_SUPPORTED;
275 } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
276 if (xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
277 xform->next->cipher.op ==
278 RTE_CRYPTO_CIPHER_OP_DECRYPT) {
279 res = NITROX_CHAIN_AUTH_CIPHER;
281 NITROX_LOG(ERR, "auth op %d, cipher op %d\n",
282 xform->auth.op, xform->next->cipher.op);
286 case RTE_CRYPTO_SYM_XFORM_CIPHER:
287 if (xform->next == NULL) {
288 res = NITROX_CHAIN_CIPHER_ONLY;
289 } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
290 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
291 xform->next->auth.op ==
292 RTE_CRYPTO_AUTH_OP_GENERATE) {
293 res = NITROX_CHAIN_CIPHER_AUTH;
295 NITROX_LOG(ERR, "cipher op %d, auth op %d\n",
296 xform->cipher.op, xform->next->auth.op);
307 static enum flexi_cipher
308 get_flexi_cipher_type(enum rte_crypto_cipher_algorithm algo, bool *is_aes)
310 enum flexi_cipher type;
313 case RTE_CRYPTO_CIPHER_AES_CBC:
314 type = CIPHER_AES_CBC;
318 type = CIPHER_INVALID;
319 NITROX_LOG(ERR, "Algorithm not supported %d\n", algo);
327 flexi_aes_keylen(size_t keylen, bool is_aes)
335 case AES_KEYSIZE_128:
338 case AES_KEYSIZE_192:
341 case AES_KEYSIZE_256:
345 NITROX_LOG(ERR, "Invalid keylen %zu\n", keylen);
346 aes_keylen = -EINVAL;
354 crypto_key_is_valid(struct rte_crypto_cipher_xform *xform,
355 struct flexi_crypto_context *fctx)
357 if (unlikely(xform->key.length > sizeof(fctx->crypto.key))) {
358 NITROX_LOG(ERR, "Invalid crypto key length %d\n",
367 configure_cipher_ctx(struct rte_crypto_cipher_xform *xform,
368 struct nitrox_crypto_ctx *ctx)
370 enum flexi_cipher type;
371 bool cipher_is_aes = false;
373 struct flexi_crypto_context *fctx = &ctx->fctx;
375 type = get_flexi_cipher_type(xform->algo, &cipher_is_aes);
376 if (unlikely(type == CIPHER_INVALID))
379 aes_keylen = flexi_aes_keylen(xform->key.length, cipher_is_aes);
380 if (unlikely(aes_keylen < 0))
383 if (unlikely(!cipher_is_aes && !crypto_key_is_valid(xform, fctx)))
386 if (unlikely(xform->iv.length > MAX_IV_LEN))
389 fctx->flags = rte_be_to_cpu_64(fctx->flags);
390 fctx->w0.cipher_type = type;
391 fctx->w0.aes_keylen = aes_keylen;
392 fctx->w0.iv_source = IV_FROM_DPTR;
393 fctx->flags = rte_cpu_to_be_64(fctx->flags);
394 memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key));
395 memcpy(fctx->crypto.key, xform->key.data, xform->key.length);
397 ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
398 ctx->req_op = (xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
399 NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT;
400 ctx->iv.offset = xform->iv.offset;
401 ctx->iv.length = xform->iv.length;
405 static enum flexi_auth
406 get_flexi_auth_type(enum rte_crypto_auth_algorithm algo)
408 enum flexi_auth type;
411 case RTE_CRYPTO_AUTH_SHA1_HMAC:
414 case RTE_CRYPTO_AUTH_SHA224_HMAC:
415 type = AUTH_SHA2_SHA224;
417 case RTE_CRYPTO_AUTH_SHA256_HMAC:
418 type = AUTH_SHA2_SHA256;
421 NITROX_LOG(ERR, "Algorithm not supported %d\n", algo);
430 auth_key_digest_is_valid(struct rte_crypto_auth_xform *xform,
431 struct flexi_crypto_context *fctx)
433 if (unlikely(!xform->key.data && xform->key.length)) {
434 NITROX_LOG(ERR, "Invalid auth key\n");
438 if (unlikely(xform->key.length > sizeof(fctx->auth.opad))) {
439 NITROX_LOG(ERR, "Invalid auth key length %d\n",
448 configure_auth_ctx(struct rte_crypto_auth_xform *xform,
449 struct nitrox_crypto_ctx *ctx)
451 enum flexi_auth type;
452 struct flexi_crypto_context *fctx = &ctx->fctx;
454 type = get_flexi_auth_type(xform->algo);
455 if (unlikely(type == AUTH_INVALID))
458 if (unlikely(!auth_key_digest_is_valid(xform, fctx)))
461 ctx->auth_op = xform->op;
462 ctx->auth_algo = xform->algo;
463 ctx->digest_length = xform->digest_length;
465 fctx->flags = rte_be_to_cpu_64(fctx->flags);
466 fctx->w0.hash_type = type;
467 fctx->w0.auth_input_type = 1;
468 fctx->w0.mac_len = xform->digest_length;
469 fctx->flags = rte_cpu_to_be_64(fctx->flags);
470 memset(&fctx->auth, 0, sizeof(fctx->auth));
471 memcpy(fctx->auth.opad, xform->key.data, xform->key.length);
476 nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev,
477 struct rte_crypto_sym_xform *xform,
478 struct rte_cryptodev_sym_session *sess,
479 struct rte_mempool *mempool)
482 struct nitrox_crypto_ctx *ctx;
483 struct rte_crypto_cipher_xform *cipher_xform = NULL;
484 struct rte_crypto_auth_xform *auth_xform = NULL;
486 if (rte_mempool_get(mempool, &mp_obj)) {
487 NITROX_LOG(ERR, "Couldn't allocate context\n");
492 ctx->nitrox_chain = get_crypto_chain_order(xform);
493 switch (ctx->nitrox_chain) {
494 case NITROX_CHAIN_CIPHER_AUTH:
495 cipher_xform = &xform->cipher;
496 auth_xform = &xform->next->auth;
498 case NITROX_CHAIN_AUTH_CIPHER:
499 auth_xform = &xform->auth;
500 cipher_xform = &xform->next->cipher;
503 NITROX_LOG(ERR, "Crypto chain not supported\n");
507 if (cipher_xform && unlikely(configure_cipher_ctx(cipher_xform, ctx))) {
508 NITROX_LOG(ERR, "Failed to configure cipher ctx\n");
512 if (auth_xform && unlikely(configure_auth_ctx(auth_xform, ctx))) {
513 NITROX_LOG(ERR, "Failed to configure auth ctx\n");
517 ctx->iova = rte_mempool_virt2iova(ctx);
518 set_sym_session_private_data(sess, cdev->driver_id, ctx);
521 rte_mempool_put(mempool, mp_obj);
526 nitrox_sym_dev_sess_clear(struct rte_cryptodev *cdev,
527 struct rte_cryptodev_sym_session *sess)
529 struct nitrox_crypto_ctx *ctx = get_sym_session_private_data(sess,
531 struct rte_mempool *sess_mp;
536 memset(ctx, 0, sizeof(*ctx));
537 sess_mp = rte_mempool_from_obj(ctx);
538 set_sym_session_private_data(sess, cdev->driver_id, NULL);
539 rte_mempool_put(sess_mp, ctx);
542 static struct nitrox_crypto_ctx *
543 get_crypto_ctx(struct rte_crypto_op *op)
545 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
546 if (likely(op->sym->session))
547 return get_sym_session_private_data(op->sym->session,
555 nitrox_enq_single_op(struct nitrox_qp *qp, struct rte_crypto_op *op)
557 struct nitrox_crypto_ctx *ctx;
558 struct nitrox_softreq *sr;
561 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
562 ctx = get_crypto_ctx(op);
563 if (unlikely(!ctx)) {
564 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
568 if (unlikely(rte_mempool_get(qp->sr_mp, (void **)&sr)))
571 err = nitrox_process_se_req(qp->qno, op, ctx, sr);
573 rte_mempool_put(qp->sr_mp, sr);
574 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
578 nitrox_qp_enqueue(qp, nitrox_sym_instr_addr(sr), sr);
583 nitrox_sym_dev_enq_burst(void *queue_pair, struct rte_crypto_op **ops,
586 struct nitrox_qp *qp = queue_pair;
587 uint16_t free_slots = 0;
591 free_slots = nitrox_qp_free_count(qp);
592 if (nb_ops > free_slots)
595 for (cnt = 0; cnt < nb_ops; cnt++) {
596 if (unlikely(nitrox_enq_single_op(qp, ops[cnt]))) {
602 nitrox_ring_dbell(qp, cnt);
603 qp->stats.enqueued_count += cnt;
605 qp->stats.enqueue_err_count++;
611 nitrox_deq_single_op(struct nitrox_qp *qp, struct rte_crypto_op **op_ptr)
613 struct nitrox_softreq *sr;
615 struct rte_crypto_op *op;
617 sr = nitrox_qp_get_softreq(qp);
618 ret = nitrox_check_se_req(sr, op_ptr);
623 nitrox_qp_dequeue(qp);
624 rte_mempool_put(qp->sr_mp, sr);
626 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
627 qp->stats.dequeued_count++;
632 if (ret == MC_MAC_MISMATCH_ERR_CODE)
633 op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
635 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
637 qp->stats.dequeue_err_count++;
642 nitrox_sym_dev_deq_burst(void *queue_pair, struct rte_crypto_op **ops,
645 struct nitrox_qp *qp = queue_pair;
646 uint16_t filled_slots = nitrox_qp_used_count(qp);
649 if (nb_ops > filled_slots)
650 nb_ops = filled_slots;
652 for (cnt = 0; cnt < nb_ops; cnt++)
653 if (nitrox_deq_single_op(qp, &ops[cnt]))
659 static struct rte_cryptodev_ops nitrox_cryptodev_ops = {
660 .dev_configure = nitrox_sym_dev_config,
661 .dev_start = nitrox_sym_dev_start,
662 .dev_stop = nitrox_sym_dev_stop,
663 .dev_close = nitrox_sym_dev_close,
664 .dev_infos_get = nitrox_sym_dev_info_get,
665 .stats_get = nitrox_sym_dev_stats_get,
666 .stats_reset = nitrox_sym_dev_stats_reset,
667 .queue_pair_setup = nitrox_sym_dev_qp_setup,
668 .queue_pair_release = nitrox_sym_dev_qp_release,
669 .sym_session_get_size = nitrox_sym_dev_sess_get_size,
670 .sym_session_configure = nitrox_sym_dev_sess_configure,
671 .sym_session_clear = nitrox_sym_dev_sess_clear
675 nitrox_sym_pmd_create(struct nitrox_device *ndev)
677 char name[RTE_CRYPTODEV_NAME_MAX_LEN];
678 struct rte_cryptodev_pmd_init_params init_params = {
680 .socket_id = ndev->pdev->device.numa_node,
681 .private_data_size = sizeof(struct nitrox_sym_device)
683 struct rte_cryptodev *cdev;
685 rte_pci_device_name(&ndev->pdev->addr, name, sizeof(name));
686 snprintf(name + strlen(name), RTE_CRYPTODEV_NAME_MAX_LEN, "_n5sym");
687 ndev->rte_sym_dev.driver = &nitrox_rte_sym_drv;
688 ndev->rte_sym_dev.numa_node = ndev->pdev->device.numa_node;
689 ndev->rte_sym_dev.devargs = NULL;
690 cdev = rte_cryptodev_pmd_create(name, &ndev->rte_sym_dev,
693 NITROX_LOG(ERR, "Cryptodev '%s' creation failed\n", name);
697 ndev->rte_sym_dev.name = cdev->data->name;
698 cdev->driver_id = nitrox_sym_drv_id;
699 cdev->dev_ops = &nitrox_cryptodev_ops;
700 cdev->enqueue_burst = nitrox_sym_dev_enq_burst;
701 cdev->dequeue_burst = nitrox_sym_dev_deq_burst;
702 cdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
703 RTE_CRYPTODEV_FF_HW_ACCELERATED |
704 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
705 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
706 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
707 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
708 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
709 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
711 ndev->sym_dev = cdev->data->dev_private;
712 ndev->sym_dev->cdev = cdev;
713 ndev->sym_dev->ndev = ndev;
714 NITROX_LOG(DEBUG, "Created cryptodev '%s', dev_id %d, drv_id %d\n",
715 cdev->data->name, cdev->data->dev_id, nitrox_sym_drv_id);
720 nitrox_sym_pmd_destroy(struct nitrox_device *ndev)
722 return rte_cryptodev_pmd_destroy(ndev->sym_dev->cdev);
725 static struct cryptodev_driver nitrox_crypto_drv;
726 RTE_PMD_REGISTER_CRYPTO_DRIVER(nitrox_crypto_drv,