X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fdpaa2_sec%2Fdpaa2_sec_dpseci.c;h=dfa72f3f93e1e894f1351d18f784010e7bf04c5d;hb=437dbd2fd4289c32e2937a1e7dbe95645457dfff;hp=6ff0d833e93289d85eb39c827c586b29118b6040;hpb=0ea0bbfebc3e557b1739b20de0dabbe938b2f058;p=dpdk.git diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c index 6ff0d833e9..dfa72f3f93 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016-2020 NXP + * Copyright 2016-2021 NXP * */ @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -1709,8 +1709,9 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, if (unlikely(fd->simple.frc)) { /* TODO Parse SEC errors */ - DPAA2_SEC_ERR("SEC returned Error - %x", + DPAA2_SEC_DP_ERR("SEC returned Error - %x\n", fd->simple.frc); + dpaa2_qp->rx_vq.err_pkts += 1; ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; } else { ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; @@ -1722,7 +1723,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, dpaa2_qp->rx_vq.rx_pkts += num_rx; - DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); + DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx, + dpaa2_qp->rx_vq.err_pkts); /*Return the total number of packets received to DPAA2 app*/ return num_rx; } @@ -1842,7 +1844,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, session->ctxt_type = DPAA2_SEC_CIPHER; session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, RTE_CACHE_LINE_SIZE); - if (session->cipher_key.data == NULL) { + if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { DPAA2_SEC_ERR("No Memory for cipher key"); rte_free(priv); return -ENOMEM; @@ -2134,10 +2136,28 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, !session->dir, session->digest_length); break; - case RTE_CRYPTO_AUTH_AES_GMAC: case RTE_CRYPTO_AUTH_AES_XCBC_MAC: + authdata.algtype = OP_ALG_ALGSEL_AES; + authdata.algmode = OP_ALG_AAI_XCBC_MAC; + session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; + bufsize = cnstr_shdsc_aes_mac( + priv->flc_desc[DESC_INITFINAL].desc, + 1, 0, SHR_NEVER, &authdata, + !session->dir, + session->digest_length); + break; case RTE_CRYPTO_AUTH_AES_CMAC: + authdata.algtype = OP_ALG_ALGSEL_AES; + authdata.algmode = OP_ALG_AAI_CMAC; + session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; + bufsize = cnstr_shdsc_aes_mac( + priv->flc_desc[DESC_INITFINAL].desc, + 1, 0, SHR_NEVER, &authdata, + !session->dir, + session->digest_length); + break; case RTE_CRYPTO_AUTH_AES_CBC_MAC: + case RTE_CRYPTO_AUTH_AES_GMAC: case RTE_CRYPTO_AUTH_KASUMI_F9: case RTE_CRYPTO_AUTH_NULL: DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", @@ -2406,6 +2426,17 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; break; case RTE_CRYPTO_AUTH_AES_XCBC_MAC: + authdata.algtype = OP_ALG_ALGSEL_AES; + authdata.algmode = OP_ALG_AAI_XCBC_MAC; + session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; + break; + case RTE_CRYPTO_AUTH_AES_CMAC: + authdata.algtype = OP_ALG_ALGSEL_AES; + authdata.algmode = OP_ALG_AAI_CMAC; + session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; + break; + case RTE_CRYPTO_AUTH_AES_CBC_MAC: + case RTE_CRYPTO_AUTH_AES_GMAC: case RTE_CRYPTO_AUTH_SNOW3G_UIA2: case RTE_CRYPTO_AUTH_NULL: case RTE_CRYPTO_AUTH_SHA1: @@ -2414,10 +2445,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_SHA224: case RTE_CRYPTO_AUTH_SHA384: case RTE_CRYPTO_AUTH_MD5: - case RTE_CRYPTO_AUTH_AES_GMAC: case RTE_CRYPTO_AUTH_KASUMI_F9: - case RTE_CRYPTO_AUTH_AES_CMAC: - case RTE_CRYPTO_AUTH_AES_CBC_MAC: case RTE_CRYPTO_AUTH_ZUC_EIA3: DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", auth_xform->algo); @@ -2750,14 +2778,18 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; authdata->algmode = OP_ALG_AAI_HMAC; break; + case RTE_CRYPTO_AUTH_AES_XCBC_MAC: + authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96; + authdata->algmode = OP_ALG_AAI_XCBC_MAC; + break; case RTE_CRYPTO_AUTH_AES_CMAC: authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; + authdata->algmode = OP_ALG_AAI_CMAC; break; case RTE_CRYPTO_AUTH_NULL: authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; break; case RTE_CRYPTO_AUTH_SHA224_HMAC: - case RTE_CRYPTO_AUTH_AES_XCBC_MAC: case RTE_CRYPTO_AUTH_SNOW3G_UIA2: case RTE_CRYPTO_AUTH_SHA1: case RTE_CRYPTO_AUTH_SHA256: @@ -3072,7 +3104,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; struct rte_crypto_sym_xform *xform = conf->crypto_xform; struct rte_crypto_auth_xform *auth_xform = NULL; - struct rte_crypto_cipher_xform *cipher_xform; + struct rte_crypto_cipher_xform *cipher_xform = NULL; dpaa2_sec_session *session = (dpaa2_sec_session *)sess; struct ctxt_priv *priv; struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; @@ -3104,18 +3136,18 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, flc = &priv->flc_desc[0].flc; /* find xfrm types */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { - cipher_xform = &xform->cipher; - } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && - xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { - session->ext_params.aead_ctxt.auth_cipher_text = true; + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { cipher_xform = &xform->cipher; - auth_xform = &xform->next->auth; - } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && - xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { - session->ext_params.aead_ctxt.auth_cipher_text = false; - cipher_xform = &xform->next->cipher; + if (xform->next != NULL) { + session->ext_params.aead_ctxt.auth_cipher_text = true; + auth_xform = &xform->next->auth; + } + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { auth_xform = &xform->auth; + if (xform->next != NULL) { + session->ext_params.aead_ctxt.auth_cipher_text = false; + cipher_xform = &xform->next->cipher; + } } else { DPAA2_SEC_ERR("Invalid crypto type"); return -EINVAL; @@ -3154,7 +3186,8 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; /* hfv ovd offset location is stored in iv.offset value*/ - session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; + if (cipher_xform) + session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; cipherdata.key = (size_t)session->cipher_key.data; cipherdata.keylen = session->cipher_key.length; @@ -3231,12 +3264,28 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, goto out; } - if (rta_inline_pdcp_query(authdata.algtype, - cipherdata.algtype, - session->pdcp.sn_size, - session->pdcp.hfn_ovd)) { - cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); - cipherdata.key_type = RTA_DATA_PTR; + if (pdcp_xform->sdap_enabled) { + int nb_keys_to_inline = + rta_inline_pdcp_sdap_query(authdata.algtype, + cipherdata.algtype, + session->pdcp.sn_size, + session->pdcp.hfn_ovd); + if (nb_keys_to_inline >= 1) { + cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); + cipherdata.key_type = RTA_DATA_PTR; + } + if (nb_keys_to_inline >= 2) { + authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); + authdata.key_type = RTA_DATA_PTR; + } + } else { + if (rta_inline_pdcp_query(authdata.algtype, + cipherdata.algtype, + session->pdcp.sn_size, + session->pdcp.hfn_ovd)) { + cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); + cipherdata.key_type = RTA_DATA_PTR; + } } if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { @@ -3260,6 +3309,10 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, pdcp_xform->hfn_threshold, &cipherdata, &authdata, 0); + + } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) { + bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc, + 1, swap, &authdata); } else { if (session->dir == DIR_ENC) { if (pdcp_xform->sdap_enabled) @@ -3534,32 +3587,10 @@ dpaa2_sec_dev_stop(struct rte_cryptodev *dev) } static int -dpaa2_sec_dev_close(struct rte_cryptodev *dev) +dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused) { - struct dpaa2_sec_dev_private *priv = dev->data->dev_private; - struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; - int ret; - PMD_INIT_FUNC_TRACE(); - /* Function is reverse of dpaa2_sec_dev_init. - * It does the following: - * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id - * 2. Close the DPSECI device - * 3. Free the allocated resources. - */ - - /*Close the device at underlying layer*/ - ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); - if (ret) { - DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); - return -1; - } - - /*Free the allocated memory for ethernet private data and dpseci*/ - priv->hw = NULL; - rte_free(dpseci); - return 0; } @@ -3819,11 +3850,31 @@ static const struct rte_security_ops dpaa2_sec_security_ops = { static int dpaa2_sec_uninit(const struct rte_cryptodev *dev) { - struct dpaa2_sec_dev_private *internals = dev->data->dev_private; + struct dpaa2_sec_dev_private *priv = dev->data->dev_private; + struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; + int ret; - rte_free(dev->security_ctx); + PMD_INIT_FUNC_TRACE(); - rte_mempool_free(internals->fle_pool); + /* Function is reverse of dpaa2_sec_dev_init. + * It does the following: + * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id + * 2. Close the DPSECI device + * 3. Free the allocated resources. + */ + + /*Close the device at underlying layer*/ + ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); + return -1; + } + + /*Free the allocated memory for ethernet private data and dpseci*/ + priv->hw = NULL; + rte_free(dpseci); + rte_free(dev->security_ctx); + rte_mempool_free(priv->fle_pool); DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", dev->data->name, rte_socket_id());