X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fdpaa2_sec%2Fdpaa2_sec_dpseci.c;h=4eb3615250f97006982ee2571599f288bf8b595a;hb=4a81d34a03b2081d7e982b78be42a1ddfe03b7b2;hp=c6af3a47153b374dbaa880f036be2b947da2d418;hpb=686bbb70cfc1b933a47eaaeb55acf49b43064026;p=dpdk.git diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c index c6af3a4715..4eb3615250 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016-2019 NXP + * Copyright 2016-2021 NXP * */ @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -36,6 +36,7 @@ /* RTA header files */ #include #include +#include #include /* Minimum job descriptor consists of a oneword job descriptor HEADER and @@ -48,21 +49,10 @@ #define FSL_MC_DPSECI_DEVID 3 #define NO_PREFETCH 0 -/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */ -#define FLE_POOL_NUM_BUFS 32000 -#define FLE_POOL_BUF_SIZE 256 -#define FLE_POOL_CACHE_SIZE 512 -#define FLE_SG_MEM_SIZE(num) (FLE_POOL_BUF_SIZE + ((num) * 32)) -#define SEC_FLC_DHR_OUTBOUND -114 -#define SEC_FLC_DHR_INBOUND 0 -enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; +uint8_t cryptodev_driver_id; -static uint8_t cryptodev_driver_id; - -int dpaa2_logtype_sec; - -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY static inline int build_proto_compound_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, @@ -86,7 +76,7 @@ build_proto_compound_sg_fd(dpaa2_sec_session *sess, RTE_CACHE_LINE_SIZE); if (unlikely(!fle)) { DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); - return -1; + return -ENOMEM; } memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); DPAA2_SET_FLE_ADDR(fle, (size_t)op); @@ -168,7 +158,8 @@ build_proto_compound_sg_fd(dpaa2_sec_session *sess, * mbuf priv after sym_op. */ if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { - uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset); + uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + + sess->pdcp.hfn_ovd_offset); /*enable HFN override override */ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); @@ -202,7 +193,7 @@ build_proto_compound_fd(dpaa2_sec_session *sess, retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { DPAA2_SEC_DP_ERR("Memory alloc failed"); - return -1; + return -ENOMEM; } memset(fle, 0, FLE_POOL_BUF_SIZE); DPAA2_SET_FLE_ADDR(fle, (size_t)op); @@ -243,7 +234,8 @@ build_proto_compound_fd(dpaa2_sec_session *sess, * mbuf priv after sym_op. */ if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { - uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset); + uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op + + sess->pdcp.hfn_ovd_offset); /*enable HFN override override */ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); @@ -315,7 +307,7 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, RTE_CACHE_LINE_SIZE); if (unlikely(!fle)) { DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); - return -1; + return -ENOMEM; } memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); DPAA2_SET_FLE_ADDR(fle, (size_t)op); @@ -463,7 +455,7 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess, retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); - return -1; + return -ENOMEM; } memset(fle, 0, FLE_POOL_BUF_SIZE); DPAA2_SET_FLE_ADDR(fle, (size_t)op); @@ -604,7 +596,7 @@ build_authenc_sg_fd(dpaa2_sec_session *sess, RTE_CACHE_LINE_SIZE); if (unlikely(!fle)) { DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); - return -1; + return -ENOMEM; } memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); DPAA2_SET_FLE_ADDR(fle, (size_t)op); @@ -755,7 +747,7 @@ build_authenc_fd(dpaa2_sec_session *sess, retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { DPAA2_SEC_ERR("Memory alloc failed for SGE"); - return -1; + return -ENOMEM; } memset(fle, 0, FLE_POOL_BUF_SIZE); DPAA2_SET_FLE_ADDR(fle, (size_t)op); @@ -885,7 +877,7 @@ static inline int build_auth_sg_fd( sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { if ((data_len & 7) || (data_offset & 7)) { DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); - return -1; + return -ENOTSUP; } data_len = data_len >> 3; @@ -898,7 +890,7 @@ static inline int build_auth_sg_fd( RTE_CACHE_LINE_SIZE); if (unlikely(!fle)) { DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); - return -1; + return -ENOMEM; } memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs)); /* first FLE entry used to store mbuf and session ctxt */ @@ -1002,7 +994,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { if ((data_len & 7) || (data_offset & 7)) { DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); - return -1; + return -ENOTSUP; } data_len = data_len >> 3; @@ -1012,7 +1004,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); - return -1; + return -ENOMEM; } memset(fle, 0, FLE_POOL_BUF_SIZE); /* TODO we are using the first FLE entry to store Mbuf. @@ -1117,7 +1109,7 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { if ((data_len & 7) || (data_offset & 7)) { DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); - return -1; + return -ENOTSUP; } data_len = data_len >> 3; @@ -1135,7 +1127,7 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, RTE_CACHE_LINE_SIZE); if (!fle) { DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); - return -1; + return -ENOMEM; } memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs)); /* first FLE entry used to store mbuf and session ctxt */ @@ -1250,7 +1242,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { if ((data_len & 7) || (data_offset & 7)) { DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); - return -1; + return -ENOTSUP; } data_len = data_len >> 3; @@ -1265,7 +1257,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); - return -1; + return -ENOMEM; } memset(fle, 0, FLE_POOL_BUF_SIZE); /* TODO we are using the first FLE entry to store Mbuf. @@ -1357,16 +1349,16 @@ build_sec_fd(struct rte_crypto_op *op, if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) sess = (dpaa2_sec_session *)get_sym_session_private_data( op->sym->session, cryptodev_driver_id); -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) sess = (dpaa2_sec_session *)get_sec_session_private_data( op->sym->sec_session); #endif else - return -1; + return -ENOTSUP; if (!sess) - return -1; + return -EINVAL; /* Any of the buffer is segmented*/ if (!rte_pktmbuf_is_contiguous(op->sym->m_src) || @@ -1385,7 +1377,7 @@ build_sec_fd(struct rte_crypto_op *op, case DPAA2_SEC_CIPHER_HASH: ret = build_authenc_sg_fd(sess, op, fd, bpid); break; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY case DPAA2_SEC_IPSEC: case DPAA2_SEC_PDCP: ret = build_proto_compound_sg_fd(sess, op, fd, bpid); @@ -1409,7 +1401,7 @@ build_sec_fd(struct rte_crypto_op *op, case DPAA2_SEC_CIPHER_HASH: ret = build_authenc_fd(sess, op, fd, bpid); break; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY case DPAA2_SEC_IPSEC: ret = build_proto_fd(sess, op, fd, bpid); break; @@ -1420,6 +1412,7 @@ build_sec_fd(struct rte_crypto_op *op, case DPAA2_SEC_HASH_CIPHER: default: DPAA2_SEC_ERR("error: Unsupported session"); + ret = -ENOTSUP; } } return ret; @@ -1433,7 +1426,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, uint32_t loop; int32_t ret; struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; - uint32_t frames_to_send; + uint32_t frames_to_send, retry_count; struct qbman_eq_desc eqdesc; struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp; struct qbman_swp *swp; @@ -1459,7 +1452,9 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, if (!DPAA2_PER_LCORE_DPIO) { ret = dpaa2_affine_qbman_swp(); if (ret) { - DPAA2_SEC_ERR("Failure in affining portal"); + DPAA2_SEC_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); return 0; } } @@ -1470,13 +1465,15 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, dpaa2_eqcr_size : nb_ops; for (loop = 0; loop < frames_to_send; loop++) { - if ((*ops)->sym->m_src->seqn) { - uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1; - - flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; - DPAA2_PER_LCORE_DQRR_SIZE--; - DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); - (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN; + if (*dpaa2_seqn((*ops)->sym->m_src)) { + uint8_t dqrr_index = + *dpaa2_seqn((*ops)->sym->m_src) - 1; + + flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index; + DPAA2_PER_LCORE_DQRR_SIZE--; + DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); + *dpaa2_seqn((*ops)->sym->m_src) = + DPAA2_INVALID_MBUF_SEQN; } /*Clear the unused FD fields before sending*/ @@ -1491,16 +1488,29 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, } ops++; } + loop = 0; + retry_count = 0; while (loop < frames_to_send) { - loop += qbman_swp_enqueue_multiple(swp, &eqdesc, - &fd_arr[loop], - &flags[loop], - frames_to_send - loop); + ret = qbman_swp_enqueue_multiple(swp, &eqdesc, + &fd_arr[loop], + &flags[loop], + frames_to_send - loop); + if (unlikely(ret < 0)) { + retry_count++; + if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { + num_tx += loop; + nb_ops -= loop; + goto skip_tx; + } + } else { + loop += ret; + retry_count = 0; + } } - num_tx += frames_to_send; - nb_ops -= frames_to_send; + num_tx += loop; + nb_ops -= loop; } skip_tx: dpaa2_qp->tx_vq.tx_pkts += num_tx; @@ -1508,7 +1518,7 @@ skip_tx: return num_tx; } -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY static inline struct rte_crypto_op * sec_simple_fd_to_mbuf(const struct qbman_fd *fd) { @@ -1547,7 +1557,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd) struct ctxt_priv *priv; struct rte_mbuf *dst, *src; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single) return sec_simple_fd_to_mbuf(fd); #endif @@ -1580,7 +1590,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd) } else dst = src; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { uint16_t len = DPAA2_GET_FD_LEN(fd); dst->pkt_len = len; @@ -1628,7 +1638,9 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, if (!DPAA2_PER_LCORE_DPIO) { ret = dpaa2_affine_qbman_swp(); if (ret) { - DPAA2_SEC_ERR("Failure in affining portal"); + DPAA2_SEC_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); return 0; } } @@ -1690,8 +1702,9 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, if (unlikely(fd->simple.frc)) { /* TODO Parse SEC errors */ - DPAA2_SEC_ERR("SEC returned Error - %x", + DPAA2_SEC_DP_ERR("SEC returned Error - %x\n", fd->simple.frc); + dpaa2_qp->rx_vq.err_pkts += 1; ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; } else { ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; @@ -1703,7 +1716,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, dpaa2_qp->rx_vq.rx_pkts += num_rx; - DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); + DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx, + dpaa2_qp->rx_vq.err_pkts); /*Return the total number of packets received to DPAA2 app*/ return num_rx; } @@ -1757,7 +1771,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, RTE_CACHE_LINE_SIZE); if (!qp) { DPAA2_SEC_ERR("malloc failed for rx/tx queues"); - return -1; + return -ENOMEM; } qp->rx_vq.crypto_data = dev->data; @@ -1767,13 +1781,13 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, RTE_CACHE_LINE_SIZE); if (!qp->rx_vq.q_storage) { DPAA2_SEC_ERR("malloc failed for q_storage"); - return -1; + return -ENOMEM; } memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { DPAA2_SEC_ERR("Unable to allocate dequeue storage"); - return -1; + return -ENOMEM; } dev->data->queue_pairs[qp_id] = qp; @@ -1785,15 +1799,6 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, return retcode; } -/** Return the number of allocated queue pairs */ -static uint32_t -dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) -{ - PMD_INIT_FUNC_TRACE(); - - return dev->data->nb_queue_pairs; -} - /** Returns the size of the aesni gcm session structure */ static unsigned int dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) @@ -1810,7 +1815,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, { struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; struct alginfo cipherdata; - int bufsize; + int bufsize, ret = 0; struct ctxt_priv *priv; struct sec_flow_context *flc; @@ -1822,7 +1827,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (priv == NULL) { DPAA2_SEC_ERR("No Memory for priv CTXT"); - return -1; + return -ENOMEM; } priv->fle_pool = dev_priv->fle_pool; @@ -1832,10 +1837,10 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, session->ctxt_type = DPAA2_SEC_CIPHER; session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, RTE_CACHE_LINE_SIZE); - if (session->cipher_key.data == NULL) { + if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { DPAA2_SEC_ERR("No Memory for cipher key"); rte_free(priv); - return -1; + return -ENOMEM; } session->cipher_key.length = xform->cipher.key.length; @@ -1858,7 +1863,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, cipherdata.algmode = OP_ALG_AAI_CBC; session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, - SHR_NEVER, &cipherdata, NULL, + SHR_NEVER, &cipherdata, session->iv.length, session->dir); break; @@ -1867,25 +1872,25 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, cipherdata.algmode = OP_ALG_AAI_CBC; session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, - SHR_NEVER, &cipherdata, NULL, + SHR_NEVER, &cipherdata, session->iv.length, session->dir); break; - case RTE_CRYPTO_CIPHER_AES_CTR: - cipherdata.algtype = OP_ALG_ALGSEL_AES; - cipherdata.algmode = OP_ALG_AAI_CTR; - session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; + case RTE_CRYPTO_CIPHER_DES_CBC: + cipherdata.algtype = OP_ALG_ALGSEL_DES; + cipherdata.algmode = OP_ALG_AAI_CBC; + session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, - SHR_NEVER, &cipherdata, NULL, + SHR_NEVER, &cipherdata, session->iv.length, session->dir); break; - case RTE_CRYPTO_CIPHER_3DES_CTR: - cipherdata.algtype = OP_ALG_ALGSEL_3DES; + case RTE_CRYPTO_CIPHER_AES_CTR: + cipherdata.algtype = OP_ALG_ALGSEL_AES; cipherdata.algmode = OP_ALG_AAI_CTR; - session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR; + session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0, - SHR_NEVER, &cipherdata, NULL, + SHR_NEVER, &cipherdata, session->iv.length, session->dir); break; @@ -1907,20 +1912,24 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_AES_F8: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_3DES_ECB: + case RTE_CRYPTO_CIPHER_3DES_CTR: case RTE_CRYPTO_CIPHER_AES_XTS: case RTE_CRYPTO_CIPHER_ARC4: case RTE_CRYPTO_CIPHER_NULL: DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", xform->cipher.algo); + ret = -ENOTSUP; goto error_out; default: DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", xform->cipher.algo); + ret = -ENOTSUP; goto error_out; } if (bufsize < 0) { DPAA2_SEC_ERR("Crypto: Descriptor build failed"); + ret = -EINVAL; goto error_out; } @@ -1932,12 +1941,12 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, for (i = 0; i < bufsize; i++) DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); #endif - return 0; + return ret; error_out: rte_free(session->cipher_key.data); rte_free(priv); - return -1; + return ret; } static int @@ -1947,7 +1956,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, { struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; struct alginfo authdata; - int bufsize; + int bufsize, ret = 0; struct ctxt_priv *priv; struct sec_flow_context *flc; @@ -1960,28 +1969,30 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (priv == NULL) { DPAA2_SEC_ERR("No Memory for priv CTXT"); - return -1; + return -ENOMEM; } priv->fle_pool = dev_priv->fle_pool; flc = &priv->flc_desc[DESC_INITFINAL].flc; session->ctxt_type = DPAA2_SEC_AUTH; - session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, + session->auth_key.length = xform->auth.key.length; + if (xform->auth.key.length) { + session->auth_key.data = rte_zmalloc(NULL, + xform->auth.key.length, RTE_CACHE_LINE_SIZE); - if (session->auth_key.data == NULL) { - DPAA2_SEC_ERR("Unable to allocate memory for auth key"); - rte_free(priv); - return -1; + if (session->auth_key.data == NULL) { + DPAA2_SEC_ERR("Unable to allocate memory for auth key"); + rte_free(priv); + return -ENOMEM; + } + memcpy(session->auth_key.data, xform->auth.key.data, + xform->auth.key.length); + authdata.key = (size_t)session->auth_key.data; + authdata.key_enc_flags = 0; + authdata.key_type = RTA_DATA_IMM; } - session->auth_key.length = xform->auth.key.length; - - memcpy(session->auth_key.data, xform->auth.key.data, - xform->auth.key.length); - authdata.key = (size_t)session->auth_key.data; authdata.keylen = session->auth_key.length; - authdata.key_enc_flags = 0; - authdata.key_type = RTA_DATA_IMM; session->digest_length = xform->auth.digest_length; session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? @@ -2064,29 +2075,98 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, !session->dir, session->digest_length); break; - case RTE_CRYPTO_AUTH_KASUMI_F9: - case RTE_CRYPTO_AUTH_NULL: case RTE_CRYPTO_AUTH_SHA1: + authdata.algtype = OP_ALG_ALGSEL_SHA1; + authdata.algmode = OP_ALG_AAI_HASH; + session->auth_alg = RTE_CRYPTO_AUTH_SHA1; + bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, + 1, 0, SHR_NEVER, &authdata, + !session->dir, + session->digest_length); + break; + case RTE_CRYPTO_AUTH_MD5: + authdata.algtype = OP_ALG_ALGSEL_MD5; + authdata.algmode = OP_ALG_AAI_HASH; + session->auth_alg = RTE_CRYPTO_AUTH_MD5; + bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, + 1, 0, SHR_NEVER, &authdata, + !session->dir, + session->digest_length); + break; case RTE_CRYPTO_AUTH_SHA256: + authdata.algtype = OP_ALG_ALGSEL_SHA256; + authdata.algmode = OP_ALG_AAI_HASH; + session->auth_alg = RTE_CRYPTO_AUTH_SHA256; + bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, + 1, 0, SHR_NEVER, &authdata, + !session->dir, + session->digest_length); + break; + case RTE_CRYPTO_AUTH_SHA384: + authdata.algtype = OP_ALG_ALGSEL_SHA384; + authdata.algmode = OP_ALG_AAI_HASH; + session->auth_alg = RTE_CRYPTO_AUTH_SHA384; + bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, + 1, 0, SHR_NEVER, &authdata, + !session->dir, + session->digest_length); + break; case RTE_CRYPTO_AUTH_SHA512: + authdata.algtype = OP_ALG_ALGSEL_SHA512; + authdata.algmode = OP_ALG_AAI_HASH; + session->auth_alg = RTE_CRYPTO_AUTH_SHA512; + bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, + 1, 0, SHR_NEVER, &authdata, + !session->dir, + session->digest_length); + break; case RTE_CRYPTO_AUTH_SHA224: - case RTE_CRYPTO_AUTH_SHA384: - case RTE_CRYPTO_AUTH_MD5: - case RTE_CRYPTO_AUTH_AES_GMAC: + authdata.algtype = OP_ALG_ALGSEL_SHA224; + authdata.algmode = OP_ALG_AAI_HASH; + session->auth_alg = RTE_CRYPTO_AUTH_SHA224; + bufsize = cnstr_shdsc_hash(priv->flc_desc[DESC_INITFINAL].desc, + 1, 0, SHR_NEVER, &authdata, + !session->dir, + session->digest_length); + break; case RTE_CRYPTO_AUTH_AES_XCBC_MAC: + authdata.algtype = OP_ALG_ALGSEL_AES; + authdata.algmode = OP_ALG_AAI_XCBC_MAC; + session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; + bufsize = cnstr_shdsc_aes_mac( + priv->flc_desc[DESC_INITFINAL].desc, + 1, 0, SHR_NEVER, &authdata, + !session->dir, + session->digest_length); + break; case RTE_CRYPTO_AUTH_AES_CMAC: + authdata.algtype = OP_ALG_ALGSEL_AES; + authdata.algmode = OP_ALG_AAI_CMAC; + session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; + bufsize = cnstr_shdsc_aes_mac( + priv->flc_desc[DESC_INITFINAL].desc, + 1, 0, SHR_NEVER, &authdata, + !session->dir, + session->digest_length); + break; case RTE_CRYPTO_AUTH_AES_CBC_MAC: + case RTE_CRYPTO_AUTH_AES_GMAC: + case RTE_CRYPTO_AUTH_KASUMI_F9: + case RTE_CRYPTO_AUTH_NULL: DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", xform->auth.algo); + ret = -ENOTSUP; goto error_out; default: DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", xform->auth.algo); + ret = -ENOTSUP; goto error_out; } if (bufsize < 0) { DPAA2_SEC_ERR("Crypto: Invalid buffer length"); + ret = -EINVAL; goto error_out; } @@ -2099,12 +2179,12 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, i, priv->flc_desc[DESC_INITFINAL].desc[i]); #endif - return 0; + return ret; error_out: rte_free(session->auth_key.data); rte_free(priv); - return -1; + return ret; } static int @@ -2119,7 +2199,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, struct ctxt_priv *priv; struct sec_flow_context *flc; struct rte_crypto_aead_xform *aead_xform = &xform->aead; - int err; + int err, ret = 0; PMD_INIT_FUNC_TRACE(); @@ -2134,7 +2214,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (priv == NULL) { DPAA2_SEC_ERR("No Memory for priv CTXT"); - return -1; + return -ENOMEM; } priv->fle_pool = dev_priv->fle_pool; @@ -2145,7 +2225,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, if (session->aead_key.data == NULL && aead_xform->key.length > 0) { DPAA2_SEC_ERR("No Memory for aead key"); rte_free(priv); - return -1; + return -ENOMEM; } memcpy(session->aead_key.data, aead_xform->key.data, aead_xform->key.length); @@ -2168,10 +2248,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, case RTE_CRYPTO_AEAD_AES_CCM: DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", aead_xform->algo); + ret = -ENOTSUP; goto error_out; default: DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", aead_xform->algo); + ret = -ENOTSUP; goto error_out; } session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? @@ -2179,12 +2261,13 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, priv->flc_desc[0].desc[0] = aeaddata.keylen; err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, - MIN_JOB_DESC_SIZE, + DESC_JOB_IO_LEN, (unsigned int *)priv->flc_desc[0].desc, &priv->flc_desc[0].desc[1], 1); if (err < 0) { DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); + ret = -EINVAL; goto error_out; } if (priv->flc_desc[0].desc[1] & 1) { @@ -2208,6 +2291,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, session->digest_length); if (bufsize < 0) { DPAA2_SEC_ERR("Crypto: Invalid buffer length"); + ret = -EINVAL; goto error_out; } @@ -2219,12 +2303,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]); #endif - return 0; + return ret; error_out: rte_free(session->aead_key.data); rte_free(priv); - return -1; + return ret; } @@ -2240,7 +2324,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, struct sec_flow_context *flc; struct rte_crypto_cipher_xform *cipher_xform; struct rte_crypto_auth_xform *auth_xform; - int err; + int err, ret = 0; PMD_INIT_FUNC_TRACE(); @@ -2268,7 +2352,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (priv == NULL) { DPAA2_SEC_ERR("No Memory for priv CTXT"); - return -1; + return -ENOMEM; } priv->fle_pool = dev_priv->fle_pool; @@ -2279,7 +2363,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { DPAA2_SEC_ERR("No Memory for cipher key"); rte_free(priv); - return -1; + return -ENOMEM; } session->cipher_key.length = cipher_xform->key.length; session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, @@ -2288,7 +2372,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, DPAA2_SEC_ERR("No Memory for auth key"); rte_free(session->cipher_key.data); rte_free(priv); - return -1; + return -ENOMEM; } session->auth_key.length = auth_xform->key.length; memcpy(session->cipher_key.data, cipher_xform->key.data, @@ -2335,6 +2419,17 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; break; case RTE_CRYPTO_AUTH_AES_XCBC_MAC: + authdata.algtype = OP_ALG_ALGSEL_AES; + authdata.algmode = OP_ALG_AAI_XCBC_MAC; + session->auth_alg = RTE_CRYPTO_AUTH_AES_XCBC_MAC; + break; + case RTE_CRYPTO_AUTH_AES_CMAC: + authdata.algtype = OP_ALG_ALGSEL_AES; + authdata.algmode = OP_ALG_AAI_CMAC; + session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; + break; + case RTE_CRYPTO_AUTH_AES_CBC_MAC: + case RTE_CRYPTO_AUTH_AES_GMAC: case RTE_CRYPTO_AUTH_SNOW3G_UIA2: case RTE_CRYPTO_AUTH_NULL: case RTE_CRYPTO_AUTH_SHA1: @@ -2343,17 +2438,16 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_SHA224: case RTE_CRYPTO_AUTH_SHA384: case RTE_CRYPTO_AUTH_MD5: - case RTE_CRYPTO_AUTH_AES_GMAC: case RTE_CRYPTO_AUTH_KASUMI_F9: - case RTE_CRYPTO_AUTH_AES_CMAC: - case RTE_CRYPTO_AUTH_AES_CBC_MAC: case RTE_CRYPTO_AUTH_ZUC_EIA3: DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", auth_xform->algo); + ret = -ENOTSUP; goto error_out; default: DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", auth_xform->algo); + ret = -ENOTSUP; goto error_out; } cipherdata.key = (size_t)session->cipher_key.data; @@ -2372,6 +2466,11 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, cipherdata.algmode = OP_ALG_AAI_CBC; session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; break; + case RTE_CRYPTO_CIPHER_DES_CBC: + cipherdata.algtype = OP_ALG_ALGSEL_DES; + cipherdata.algmode = OP_ALG_AAI_CBC; + session->cipher_alg = RTE_CRYPTO_CIPHER_DES_CBC; + break; case RTE_CRYPTO_CIPHER_AES_CTR: cipherdata.algtype = OP_ALG_ALGSEL_AES; cipherdata.algmode = OP_ALG_AAI_CTR; @@ -2381,14 +2480,17 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_ZUC_EEA3: case RTE_CRYPTO_CIPHER_NULL: case RTE_CRYPTO_CIPHER_3DES_ECB: + case RTE_CRYPTO_CIPHER_3DES_CTR: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_KASUMI_F8: DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", cipher_xform->algo); + ret = -ENOTSUP; goto error_out; default: DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", cipher_xform->algo); + ret = -ENOTSUP; goto error_out; } session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? @@ -2397,12 +2499,13 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, priv->flc_desc[0].desc[0] = cipherdata.keylen; priv->flc_desc[0].desc[1] = authdata.keylen; err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, - MIN_JOB_DESC_SIZE, + DESC_JOB_IO_LEN, (unsigned int *)priv->flc_desc[0].desc, &priv->flc_desc[0].desc[2], 2); if (err < 0) { DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); + ret = -EINVAL; goto error_out; } if (priv->flc_desc[0].desc[2] & 1) { @@ -2430,10 +2533,12 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, session->dir); if (bufsize < 0) { DPAA2_SEC_ERR("Crypto: Invalid buffer length"); + ret = -EINVAL; goto error_out; } } else { DPAA2_SEC_ERR("Hash before cipher not supported"); + ret = -ENOTSUP; goto error_out; } @@ -2446,13 +2551,13 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, i, priv->flc_desc[0].desc[i]); #endif - return 0; + return ret; error_out: rte_free(session->cipher_key.data); rte_free(session->auth_key.data); rte_free(priv); - return -1; + return ret; } static int @@ -2466,7 +2571,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, if (unlikely(sess == NULL)) { DPAA2_SEC_ERR("Invalid session struct"); - return -1; + return -EINVAL; } memset(session, 0, sizeof(dpaa2_sec_session)); @@ -2515,7 +2620,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, return ret; } -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY static int dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, dpaa2_sec_session *session, @@ -2527,7 +2632,7 @@ dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, RTE_CACHE_LINE_SIZE); if (session->aead_key.data == NULL && aead_xform->key.length > 0) { DPAA2_SEC_ERR("No Memory for aead key"); - return -1; + return -ENOMEM; } memcpy(session->aead_key.data, aead_xform->key.data, aead_xform->key.length); @@ -2555,7 +2660,7 @@ dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, default: DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d", session->digest_length); - return -1; + return -EINVAL; } aeaddata->algmode = OP_ALG_AAI_GCM; session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; @@ -2574,7 +2679,7 @@ dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, default: DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d", session->digest_length); - return -1; + return -EINVAL; } aeaddata->algmode = OP_ALG_AAI_CCM; session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM; @@ -2582,7 +2687,7 @@ dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform, default: DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", aead_xform->algo); - return -1; + return -ENOTSUP; } session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? DIR_ENC : DIR_DEC; @@ -2666,14 +2771,18 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256; authdata->algmode = OP_ALG_AAI_HMAC; break; + case RTE_CRYPTO_AUTH_AES_XCBC_MAC: + authdata->algtype = OP_PCL_IPSEC_AES_XCBC_MAC_96; + authdata->algmode = OP_ALG_AAI_XCBC_MAC; + break; case RTE_CRYPTO_AUTH_AES_CMAC: authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96; + authdata->algmode = OP_ALG_AAI_CMAC; break; case RTE_CRYPTO_AUTH_NULL: authdata->algtype = OP_PCL_IPSEC_HMAC_NULL; break; case RTE_CRYPTO_AUTH_SHA224_HMAC: - case RTE_CRYPTO_AUTH_AES_XCBC_MAC: case RTE_CRYPTO_AUTH_SNOW3G_UIA2: case RTE_CRYPTO_AUTH_SHA1: case RTE_CRYPTO_AUTH_SHA256: @@ -2687,11 +2796,11 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, case RTE_CRYPTO_AUTH_ZUC_EIA3: DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", session->auth_alg); - return -1; + return -ENOTSUP; default: DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", session->auth_alg); - return -1; + return -ENOTSUP; } cipherdata->key = (size_t)session->cipher_key.data; cipherdata->keylen = session->cipher_key.length; @@ -2707,6 +2816,10 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, cipherdata->algtype = OP_PCL_IPSEC_3DES; cipherdata->algmode = OP_ALG_AAI_CBC; break; + case RTE_CRYPTO_CIPHER_DES_CBC: + cipherdata->algtype = OP_PCL_IPSEC_DES; + cipherdata->algmode = OP_ALG_AAI_CBC; + break; case RTE_CRYPTO_CIPHER_AES_CTR: cipherdata->algtype = OP_PCL_IPSEC_AES_CTR; cipherdata->algmode = OP_ALG_AAI_CTR; @@ -2717,26 +2830,21 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform, case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: case RTE_CRYPTO_CIPHER_ZUC_EEA3: case RTE_CRYPTO_CIPHER_3DES_ECB: + case RTE_CRYPTO_CIPHER_3DES_CTR: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_KASUMI_F8: DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", session->cipher_alg); - return -1; + return -ENOTSUP; default: DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", session->cipher_alg); - return -1; + return -ENOTSUP; } return 0; } -#ifdef RTE_LIBRTE_SECURITY_TEST -static uint8_t aes_cbc_iv[] = { - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; -#endif - static int dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, struct rte_security_session_conf *conf, @@ -2812,6 +2920,10 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, /* copy algo specific data to PDB */ switch (cipherdata.algtype) { + case OP_PCL_IPSEC_AES_CTR: + encap_pdb.ctr.ctr_initial = 0x00000001; + encap_pdb.ctr.ctr_nonce = ipsec_xform->salt; + break; case OP_PCL_IPSEC_AES_GCM8: case OP_PCL_IPSEC_AES_GCM12: case OP_PCL_IPSEC_AES_GCM16: @@ -2872,7 +2984,8 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, } bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc, - 1, 0, SHR_SERIAL, &encap_pdb, + 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? + SHR_WAIT : SHR_SERIAL, &encap_pdb, hdr, &cipherdata, &authdata); } else if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { @@ -2882,6 +2995,10 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb)); /* copy algo specific data to PDB */ switch (cipherdata.algtype) { + case OP_PCL_IPSEC_AES_CTR: + decap_pdb.ctr.ctr_initial = 0x00000001; + decap_pdb.ctr.ctr_nonce = ipsec_xform->salt; + break; case OP_PCL_IPSEC_AES_GCM8: case OP_PCL_IPSEC_AES_GCM12: case OP_PCL_IPSEC_AES_GCM16: @@ -2896,9 +3013,45 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, sizeof(struct rte_ipv6_hdr) << 16; if (ipsec_xform->options.esn) decap_pdb.options |= PDBOPTS_ESP_ESN; + + if (ipsec_xform->replay_win_sz) { + uint32_t win_sz; + win_sz = rte_align32pow2(ipsec_xform->replay_win_sz); + + if (rta_sec_era < RTA_SEC_ERA_10 && win_sz > 128) { + DPAA2_SEC_INFO("Max Anti replay Win sz = 128"); + win_sz = 128; + } + switch (win_sz) { + case 1: + case 2: + case 4: + case 8: + case 16: + case 32: + decap_pdb.options |= PDBOPTS_ESP_ARS32; + break; + case 64: + decap_pdb.options |= PDBOPTS_ESP_ARS64; + break; + case 256: + decap_pdb.options |= PDBOPTS_ESP_ARS256; + break; + case 512: + decap_pdb.options |= PDBOPTS_ESP_ARS512; + break; + case 1024: + decap_pdb.options |= PDBOPTS_ESP_ARS1024; + break; + case 128: + default: + decap_pdb.options |= PDBOPTS_ESP_ARS128; + } + } session->dir = DIR_DEC; bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc, - 1, 0, SHR_SERIAL, + 1, 0, (rta_sec_era >= RTA_SEC_ERA_10) ? + SHR_WAIT : SHR_SERIAL, &decap_pdb, &cipherdata, &authdata); } else goto out; @@ -2944,7 +3097,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; struct rte_crypto_sym_xform *xform = conf->crypto_xform; struct rte_crypto_auth_xform *auth_xform = NULL; - struct rte_crypto_cipher_xform *cipher_xform; + struct rte_crypto_cipher_xform *cipher_xform = NULL; dpaa2_sec_session *session = (dpaa2_sec_session *)sess; struct ctxt_priv *priv; struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; @@ -2976,18 +3129,18 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, flc = &priv->flc_desc[0].flc; /* find xfrm types */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { cipher_xform = &xform->cipher; - } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && - xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { - session->ext_params.aead_ctxt.auth_cipher_text = true; - cipher_xform = &xform->cipher; - auth_xform = &xform->next->auth; - } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && - xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { - session->ext_params.aead_ctxt.auth_cipher_text = false; - cipher_xform = &xform->next->cipher; + if (xform->next != NULL) { + session->ext_params.aead_ctxt.auth_cipher_text = true; + auth_xform = &xform->next->auth; + } + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { auth_xform = &xform->auth; + if (xform->next != NULL) { + session->ext_params.aead_ctxt.auth_cipher_text = false; + cipher_xform = &xform->next->cipher; + } } else { DPAA2_SEC_ERR("Invalid crypto type"); return -EINVAL; @@ -3026,7 +3179,8 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd; /* hfv ovd offset location is stored in iv.offset value*/ - session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; + if (cipher_xform) + session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset; cipherdata.key = (size_t)session->cipher_key.data; cipherdata.keylen = session->cipher_key.length; @@ -3103,6 +3257,30 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, goto out; } + if (pdcp_xform->sdap_enabled) { + int nb_keys_to_inline = + rta_inline_pdcp_sdap_query(authdata.algtype, + cipherdata.algtype, + session->pdcp.sn_size, + session->pdcp.hfn_ovd); + if (nb_keys_to_inline >= 1) { + cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); + cipherdata.key_type = RTA_DATA_PTR; + } + if (nb_keys_to_inline >= 2) { + authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key); + authdata.key_type = RTA_DATA_PTR; + } + } else { + if (rta_inline_pdcp_query(authdata.algtype, + cipherdata.algtype, + session->pdcp.sn_size, + session->pdcp.hfn_ovd)) { + cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key); + cipherdata.key_type = RTA_DATA_PTR; + } + } + if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { if (session->dir == DIR_ENC) bufsize = cnstr_shdsc_pdcp_c_plane_encap( @@ -3124,9 +3302,14 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, pdcp_xform->hfn_threshold, &cipherdata, &authdata, 0); + + } else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) { + bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc, + 1, swap, &authdata); } else { - if (session->dir == DIR_ENC) - bufsize = cnstr_shdsc_pdcp_u_plane_encap( + if (session->dir == DIR_ENC) { + if (pdcp_xform->sdap_enabled) + bufsize = cnstr_shdsc_pdcp_sdap_u_plane_encap( priv->flc_desc[0].desc, 1, swap, session->pdcp.sn_size, pdcp_xform->hfn, @@ -3134,8 +3317,27 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, pdcp_xform->pkt_dir, pdcp_xform->hfn_threshold, &cipherdata, p_authdata, 0); - else if (session->dir == DIR_DEC) - bufsize = cnstr_shdsc_pdcp_u_plane_decap( + else + bufsize = cnstr_shdsc_pdcp_u_plane_encap( + priv->flc_desc[0].desc, 1, swap, + session->pdcp.sn_size, + pdcp_xform->hfn, + pdcp_xform->bearer, + pdcp_xform->pkt_dir, + pdcp_xform->hfn_threshold, + &cipherdata, p_authdata, 0); + } else if (session->dir == DIR_DEC) { + if (pdcp_xform->sdap_enabled) + bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap( + priv->flc_desc[0].desc, 1, swap, + session->pdcp.sn_size, + pdcp_xform->hfn, + pdcp_xform->bearer, + pdcp_xform->pkt_dir, + pdcp_xform->hfn_threshold, + &cipherdata, p_authdata, 0); + else + bufsize = cnstr_shdsc_pdcp_u_plane_decap( priv->flc_desc[0].desc, 1, swap, session->pdcp.sn_size, pdcp_xform->hfn, @@ -3143,6 +3345,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, pdcp_xform->pkt_dir, pdcp_xform->hfn_threshold, &cipherdata, p_authdata, 0); + } } if (bufsize < 0) { @@ -3179,7 +3382,7 @@ out: rte_free(session->auth_key.data); rte_free(session->cipher_key.data); rte_free(priv); - return -1; + return -EINVAL; } static int @@ -3377,32 +3580,10 @@ dpaa2_sec_dev_stop(struct rte_cryptodev *dev) } static int -dpaa2_sec_dev_close(struct rte_cryptodev *dev) +dpaa2_sec_dev_close(struct rte_cryptodev *dev __rte_unused) { - struct dpaa2_sec_dev_private *priv = dev->data->dev_private; - struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; - int ret; - PMD_INIT_FUNC_TRACE(); - /* Function is reverse of dpaa2_sec_dev_init. - * It does the following: - * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id - * 2. Close the DPSECI device - * 3. Free the allocated resources. - */ - - /*Close the device at underlying layer*/ - ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); - if (ret) { - DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); - return -1; - } - - /*Free the allocated memory for ethernet private data and dpseci*/ - priv->hw = NULL; - rte_free(dpseci); - return 0; } @@ -3428,7 +3609,7 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev, struct rte_cryptodev_stats *stats) { struct dpaa2_sec_dev_private *priv = dev->data->dev_private; - struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io dpseci; struct dpseci_sec_counters counters = {0}; struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **) dev->data->queue_pairs; @@ -3440,7 +3621,7 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev, return; } for (i = 0; i < dev->data->nb_queue_pairs; i++) { - if (qp[i] == NULL) { + if (qp == NULL || qp[i] == NULL) { DPAA2_SEC_DEBUG("Uninitialised queue pair"); continue; } @@ -3451,7 +3632,12 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev, stats->dequeue_err_count += qp[i]->rx_vq.err_pkts; } - ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, + /* In case as secondary process access stats, MCP portal in priv-hw + * may have primary process address. Need the secondary process + * based MCP portal address for this object. + */ + dpseci.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); + ret = dpseci_get_sec_counters(&dpseci, CMD_PRI_LOW, priv->token, &counters); if (ret) { DPAA2_SEC_ERR("SEC counters failed"); @@ -3497,7 +3683,7 @@ void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) } } -static void __attribute__((hot)) +static void __rte_hot dpaa2_sec_process_parallel_event(struct qbman_swp *swp, const struct qbman_fd *fd, const struct qbman_result *dq, @@ -3523,7 +3709,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp, qbman_swp_dqrr_consume(swp, dq); } static void -dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), +dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused, const struct qbman_fd *fd, const struct qbman_result *dq, struct dpaa2_queue *rxq, @@ -3548,7 +3734,7 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), ev->event_ptr = sec_fd_to_mbuf(fd); dqrr_index = qbman_get_dqrr_idx(dq); - crypto_op->sym->m_src->seqn = dqrr_index + 1; + *dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1; DPAA2_PER_LCORE_DQRR_SIZE++; DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src; @@ -3632,13 +3818,15 @@ static struct rte_cryptodev_ops crypto_ops = { .stats_reset = dpaa2_sec_stats_reset, .queue_pair_setup = dpaa2_sec_queue_pair_setup, .queue_pair_release = dpaa2_sec_queue_pair_release, - .queue_pair_count = dpaa2_sec_queue_pair_count, .sym_session_get_size = dpaa2_sec_sym_session_get_size, .sym_session_configure = dpaa2_sec_sym_session_configure, .sym_session_clear = dpaa2_sec_sym_session_clear, + /* Raw data-path API related operations */ + .sym_get_raw_dp_ctx_size = dpaa2_sec_get_dp_ctx_size, + .sym_configure_raw_dp_ctx = dpaa2_sec_configure_raw_dp_ctx, }; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY static const struct rte_security_capability * dpaa2_sec_capabilities_get(void *device __rte_unused) { @@ -3658,11 +3846,31 @@ static const struct rte_security_ops dpaa2_sec_security_ops = { static int dpaa2_sec_uninit(const struct rte_cryptodev *dev) { - struct dpaa2_sec_dev_private *internals = dev->data->dev_private; + struct dpaa2_sec_dev_private *priv = dev->data->dev_private; + struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw; + int ret; - rte_free(dev->security_ctx); + PMD_INIT_FUNC_TRACE(); + + /* Function is reverse of dpaa2_sec_dev_init. + * It does the following: + * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id + * 2. Close the DPSECI device + * 3. Free the allocated resources. + */ + + /*Close the device at underlying layer*/ + ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); + return -1; + } - rte_mempool_free(internals->fle_pool); + /*Free the allocated memory for ethernet private data and dpseci*/ + priv->hw = NULL; + rte_free(dpseci); + rte_free(dev->security_ctx); + rte_mempool_free(priv->fle_pool); DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", dev->data->name, rte_socket_id()); @@ -3676,7 +3884,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) struct dpaa2_sec_dev_private *internals; struct rte_device *dev = cryptodev->device; struct rte_dpaa2_device *dpaa2_dev; -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY struct rte_security_ctx *security_instance; #endif struct fsl_mc_io *dpseci; @@ -3687,10 +3895,6 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) PMD_INIT_FUNC_TRACE(); dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); - if (dpaa2_dev == NULL) { - DPAA2_SEC_ERR("DPAA2 SEC device not found"); - return -1; - } hw_id = dpaa2_dev->object_id; cryptodev->driver_id = cryptodev_driver_id; @@ -3702,6 +3906,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) RTE_CRYPTODEV_FF_HW_ACCELERATED | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_SECURITY | + RTE_CRYPTODEV_FF_SYM_RAW_DP | RTE_CRYPTODEV_FF_IN_PLACE_SGL | RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | @@ -3719,7 +3924,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) DPAA2_SEC_DEBUG("Device already init by primary process"); return 0; } -#ifdef RTE_LIBRTE_SECURITY +#ifdef RTE_LIB_SECURITY /* Initialize security_ctx only for primary process*/ security_instance = rte_malloc("rte_security_instances_ops", sizeof(struct rte_security_ctx), 0); @@ -3736,9 +3941,9 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) if (!dpseci) { DPAA2_SEC_ERR( "Error in allocating the memory for dpsec object"); - return -1; + return -ENOMEM; } - dpseci->regs = rte_mcp_ptr_list[0]; + dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); if (retcode != 0) { @@ -3820,6 +4025,8 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused, if (dpaa2_svr_family == SVR_LX2160A) rta_set_sec_era(RTA_SEC_ERA_10); + else + rta_set_sec_era(RTA_SEC_ERA_8); DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era()); @@ -3868,11 +4075,4 @@ static struct cryptodev_driver dpaa2_sec_crypto_drv; RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver.driver, cryptodev_driver_id); - -RTE_INIT(dpaa2_sec_init_log) -{ - /* Bus level logs */ - dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2"); - if (dpaa2_logtype_sec >= 0) - rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE); -} +RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);