From: Akhil Goyal Date: Sat, 9 May 2020 22:22:59 +0000 (+0530) Subject: crypto/dpaa_sec: improve error handling X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=c08ced9a9a4a5f291f5f4d92f9a106f993b93737;p=dpdk.git crypto/dpaa_sec: improve error handling The return values in cases of errors were not specified properly. With this patch appropriate error numbers are returned. Signed-off-by: Akhil Goyal Acked-by: Hemant Agrawal --- diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index 097ab8de98..25dcbd259a 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -2047,7 +2047,7 @@ dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused, default: DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", xform->cipher.algo); - return -1; + return -ENOTSUP; } session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? DIR_ENC : DIR_DEC; @@ -2114,7 +2114,7 @@ dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused, default: DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", xform->auth.algo); - return -1; + return -ENOTSUP; } session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? @@ -2149,7 +2149,7 @@ dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused, RTE_CACHE_LINE_SIZE); if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { DPAA_SEC_ERR("No Memory for cipher key"); - return -1; + return -ENOMEM; } session->cipher_key.length = cipher_xform->key.length; session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, @@ -2195,7 +2195,7 @@ dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused, default: DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u", auth_xform->algo); - return -1; + return -ENOTSUP; } session->cipher_alg = cipher_xform->algo; @@ -2216,7 +2216,7 @@ dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused, default: DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", cipher_xform->algo); - return -1; + return -ENOTSUP; } session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? DIR_ENC : DIR_DEC; @@ -2252,7 +2252,7 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, break; default: DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg); - return -ENOMEM; + return -ENOTSUP; } session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? @@ -2303,7 +2303,7 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) ret = dpaa_sec_prep_cdb(sess); if (ret) { DPAA_SEC_ERR("Unable to prepare sec cdb"); - return -1; + return ret; } if (unlikely(!RTE_PER_LCORE(dpaa_io))) { ret = rte_dpaa_portal_init((void *)0); @@ -2378,7 +2378,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, ret = dpaa_sec_chain_init(dev, xform, session); } else { DPAA_SEC_ERR("Not supported: Auth then Cipher"); - return -EINVAL; + return -ENOTSUP; } /* Authenticate then Cipher */ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && @@ -2394,7 +2394,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, ret = dpaa_sec_chain_init(dev, xform, session); } else { DPAA_SEC_ERR("Not supported: Auth then Cipher"); - return -EINVAL; + return -ENOTSUP; } /* AEAD operation for AES-GCM kind of Algorithms */ @@ -2417,6 +2417,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, if (session->inq[i] == NULL) { DPAA_SEC_ERR("unable to attach sec queue"); rte_spinlock_unlock(&internals->lock); + ret = -EBUSY; goto err1; } } @@ -2426,7 +2427,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, err1: free_session_data(session); - return -EINVAL; + return ret; } static int @@ -2506,7 +2507,7 @@ dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, RTE_CACHE_LINE_SIZE); if (session->aead_key.data == NULL && aead_xform->key.length > 0) { DPAA_SEC_ERR("No Memory for aead key"); - return -1; + return -ENOMEM; } memcpy(session->aead_key.data, aead_xform->key.data, aead_xform->key.length); @@ -2529,7 +2530,7 @@ dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, default: DPAA_SEC_ERR("Crypto: Undefined GCM digest %d", session->digest_length); - return -1; + return -EINVAL; } if (session->dir == DIR_ENC) { memcpy(session->encap_pdb.gcm.salt, @@ -2544,7 +2545,7 @@ dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, default: DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u", aead_xform->algo); - return -1; + return -ENOTSUP; } return 0; } @@ -2641,11 +2642,11 @@ dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, case RTE_CRYPTO_AUTH_ZUC_EIA3: DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", session->auth_alg); - return -1; + return -ENOTSUP; default: DPAA_SEC_ERR("Crypto: Undefined Auth specified %u", session->auth_alg); - return -1; + return -ENOTSUP; } switch (session->cipher_alg) { @@ -2678,11 +2679,11 @@ dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, case RTE_CRYPTO_CIPHER_KASUMI_F8: DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u", session->cipher_alg); - return -1; + return -ENOTSUP; default: DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", session->cipher_alg); - return -1; + return -ENOTSUP; } return 0; @@ -2857,6 +2858,7 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, dpaa_sec_session *session = (dpaa_sec_session *)sess; struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private; uint32_t i; + int ret; PMD_INIT_FUNC_TRACE(); @@ -2896,7 +2898,7 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, default: DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", session->cipher_alg); - return -1; + return -EINVAL; } session->cipher_key.data = rte_zmalloc(NULL, @@ -2925,6 +2927,7 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) { DPAA_SEC_ERR( "PDCP Seq Num size should be 5/12 bits for cmode"); + ret = -EINVAL; goto out; } } @@ -2947,7 +2950,7 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", session->auth_alg); rte_free(session->cipher_key.data); - return -1; + return -EINVAL; } session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, @@ -2982,6 +2985,7 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, if (session->inq[i] == NULL) { DPAA_SEC_ERR("unable to attach sec queue"); rte_spinlock_unlock(&dev_priv->lock); + ret = -EBUSY; goto out; } } @@ -2991,7 +2995,7 @@ out: rte_free(session->auth_key.data); rte_free(session->cipher_key.data); memset(session, 0, sizeof(dpaa_sec_session)); - return -1; + return ret; } static int @@ -3242,7 +3246,7 @@ dpaa_sec_eventq_attach(const struct rte_cryptodev *dev, break; case RTE_SCHED_TYPE_ORDERED: DPAA_SEC_ERR("Ordered queue schedule type is not supported\n"); - return -1; + return -ENOTSUP; default: opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;