X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fdpaa2_sec%2Fdpaa2_sec_raw_dp.c;h=74f204563726b3a0221c0a564ce28461888266a4;hb=90a2ec4ae81f2ef52f7c14bfc9307e75a4127fa4;hp=565af6dcba2f39431d3dd709ae712b2f9e84c2e4;hpb=aa6ec1fd84438cefa9dceade25f274a26f5bc8f0;p=dpdk.git diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c index 565af6dcba..74f2045637 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c @@ -24,6 +24,7 @@ struct dpaa2_sec_raw_dp_ctx { static int build_raw_dp_chain_fd(uint8_t *drv_ctx, struct rte_crypto_sgl *sgl, + struct rte_crypto_sgl *dest_sgl, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *auth_iv, @@ -89,17 +90,33 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx, (cipher_len + icv_len) : cipher_len; - /* Configure Output SGE for Encap/Decap */ - DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.auth.head); - sge->length = sgl->vec[0].len - ofs.ofs.auth.head; + /* OOP */ + if (dest_sgl) { + /* Configure Output SGE for Encap/Decap */ + DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova); + DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head); + sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head; - /* o/p segs */ - for (i = 1; i < sgl->num; i++) { - sge++; - DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); - sge->length = sgl->vec[i].len; + /* o/p segs */ + for (i = 1; i < dest_sgl->num; i++) { + sge++; + DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = dest_sgl->vec[i].len; + } + } else { + /* Configure Output SGE for Encap/Decap */ + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); + DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head); + sge->length = sgl->vec[0].len - ofs.ofs.cipher.head; + + /* o/p segs */ + for (i = 1; i < sgl->num; i++) { + sge++; + DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = sgl->vec[i].len; + } } if (sess->dir == DIR_ENC) { @@ -160,6 +177,7 @@ build_raw_dp_chain_fd(uint8_t *drv_ctx, static int build_raw_dp_aead_fd(uint8_t *drv_ctx, struct rte_crypto_sgl *sgl, + struct rte_crypto_sgl *dest_sgl, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *auth_iv, @@ -167,14 +185,142 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx, void *userdata, struct qbman_fd *fd) { - RTE_SET_USED(drv_ctx); - RTE_SET_USED(sgl); - RTE_SET_USED(iv); - RTE_SET_USED(digest); - RTE_SET_USED(auth_iv); - RTE_SET_USED(ofs); - RTE_SET_USED(userdata); - RTE_SET_USED(fd); + dpaa2_sec_session *sess = + ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session; + struct ctxt_priv *priv = sess->ctxt; + struct qbman_fle *fle, *sge, *ip_fle, *op_fle; + struct sec_flow_context *flc; + uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len; + int icv_len = sess->digest_length; + uint8_t *old_icv; + uint8_t *IV_ptr = iv->va; + unsigned int i = 0; + int data_len = 0, aead_len = 0; + + for (i = 0; i < sgl->num; i++) + data_len += sgl->vec[i].len; + + aead_len = data_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail; + + /* first FLE entry used to store mbuf and session ctxt */ + fle = (struct qbman_fle *)rte_malloc(NULL, + FLE_SG_MEM_SIZE(2 * sgl->num), + RTE_CACHE_LINE_SIZE); + if (unlikely(!fle)) { + DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); + return -ENOMEM; + } + memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num)); + DPAA2_SET_FLE_ADDR(fle, (size_t)userdata); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); + + op_fle = fle + 1; + ip_fle = fle + 2; + sge = fle + 3; + + /* Save the shared descriptor */ + flc = &priv->flc_desc[0].flc; + + /* Configure FD as a FRAME LIST */ + DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); + DPAA2_SET_FD_COMPOUND_FMT(fd); + DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); + + /* Configure Output FLE with Scatter/Gather Entry */ + DPAA2_SET_FLE_SG_EXT(op_fle); + DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); + + if (auth_only_len) + DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len); + + op_fle->length = (sess->dir == DIR_ENC) ? + (aead_len + icv_len) : + aead_len; + + /* OOP */ + if (dest_sgl) { + /* Configure Output SGE for Encap/Decap */ + DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova); + DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head); + sge->length = dest_sgl->vec[0].len - ofs.ofs.cipher.head; + + /* o/p segs */ + for (i = 1; i < dest_sgl->num; i++) { + sge++; + DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = dest_sgl->vec[i].len; + } + } else { + /* Configure Output SGE for Encap/Decap */ + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); + DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head); + sge->length = sgl->vec[0].len - ofs.ofs.cipher.head; + + /* o/p segs */ + for (i = 1; i < sgl->num; i++) { + sge++; + DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = sgl->vec[i].len; + } + } + + if (sess->dir == DIR_ENC) { + sge++; + DPAA2_SET_FLE_ADDR(sge, digest->iova); + sge->length = icv_len; + } + DPAA2_SET_FLE_FIN(sge); + + sge++; + + /* Configure Input FLE with Scatter/Gather Entry */ + DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); + DPAA2_SET_FLE_SG_EXT(ip_fle); + DPAA2_SET_FLE_FIN(ip_fle); + ip_fle->length = (sess->dir == DIR_ENC) ? + (aead_len + sess->iv.length + auth_only_len) : + (aead_len + sess->iv.length + auth_only_len + + icv_len); + + /* Configure Input SGE for Encap/Decap */ + DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr)); + sge->length = sess->iv.length; + + sge++; + if (auth_only_len) { + DPAA2_SET_FLE_ADDR(sge, auth_iv->iova); + sge->length = auth_only_len; + sge++; + } + + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); + DPAA2_SET_FLE_OFFSET(sge, ofs.ofs.cipher.head); + sge->length = sgl->vec[0].len - ofs.ofs.cipher.head; + + /* i/p segs */ + for (i = 1; i < sgl->num; i++) { + sge++; + DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = sgl->vec[i].len; + } + + if (sess->dir == DIR_DEC) { + sge++; + old_icv = (uint8_t *)(sge + 1); + memcpy(old_icv, digest->va, icv_len); + DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv)); + sge->length = icv_len; + } + + DPAA2_SET_FLE_FIN(sge); + if (auth_only_len) { + DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len); + DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len); + } + DPAA2_SET_FD_LEN(fd, ip_fle->length); return 0; } @@ -182,6 +328,7 @@ build_raw_dp_aead_fd(uint8_t *drv_ctx, static int build_raw_dp_auth_fd(uint8_t *drv_ctx, struct rte_crypto_sgl *sgl, + struct rte_crypto_sgl *dest_sgl, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *auth_iv, @@ -191,6 +338,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx, { RTE_SET_USED(iv); RTE_SET_USED(auth_iv); + RTE_SET_USED(dest_sgl); dpaa2_sec_session *sess = ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session; @@ -207,16 +355,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx, data_len = total_len - ofs.ofs.auth.head - ofs.ofs.auth.tail; data_offset = ofs.ofs.auth.head; - if (sess->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 || - sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) { - if ((data_len & 7) || (data_offset & 7)) { - DPAA2_SEC_ERR("AUTH: len/offset must be full bytes"); - return -ENOTSUP; - } - - data_len = data_len >> 3; - data_offset = data_offset >> 3; - } + /* For SNOW3G and ZUC, lengths in bits only supported */ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE(2 * sgl->num), RTE_CACHE_LINE_SIZE); @@ -304,6 +443,7 @@ build_raw_dp_auth_fd(uint8_t *drv_ctx, static int build_raw_dp_proto_fd(uint8_t *drv_ctx, struct rte_crypto_sgl *sgl, + struct rte_crypto_sgl *dest_sgl, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *auth_iv, @@ -311,36 +451,123 @@ build_raw_dp_proto_fd(uint8_t *drv_ctx, void *userdata, struct qbman_fd *fd) { - RTE_SET_USED(drv_ctx); - RTE_SET_USED(sgl); RTE_SET_USED(iv); RTE_SET_USED(digest); RTE_SET_USED(auth_iv); RTE_SET_USED(ofs); - RTE_SET_USED(userdata); - RTE_SET_USED(fd); - return 0; -} + dpaa2_sec_session *sess = + ((struct dpaa2_sec_raw_dp_ctx *)drv_ctx)->session; + struct ctxt_priv *priv = sess->ctxt; + struct qbman_fle *fle, *sge, *ip_fle, *op_fle; + struct sec_flow_context *flc; + uint32_t in_len = 0, out_len = 0, i; -static int -build_raw_dp_proto_compound_fd(uint8_t *drv_ctx, - struct rte_crypto_sgl *sgl, - struct rte_crypto_va_iova_ptr *iv, - struct rte_crypto_va_iova_ptr *digest, - struct rte_crypto_va_iova_ptr *auth_iv, - union rte_crypto_sym_ofs ofs, - void *userdata, - struct qbman_fd *fd) -{ - RTE_SET_USED(drv_ctx); - RTE_SET_USED(sgl); - RTE_SET_USED(iv); - RTE_SET_USED(digest); - RTE_SET_USED(auth_iv); - RTE_SET_USED(ofs); - RTE_SET_USED(userdata); - RTE_SET_USED(fd); + /* first FLE entry used to store mbuf and session ctxt */ + fle = (struct qbman_fle *)rte_malloc(NULL, + FLE_SG_MEM_SIZE(2 * sgl->num), + RTE_CACHE_LINE_SIZE); + if (unlikely(!fle)) { + DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE"); + return -ENOMEM; + } + memset(fle, 0, FLE_SG_MEM_SIZE(2 * sgl->num)); + DPAA2_SET_FLE_ADDR(fle, (size_t)userdata); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); + + /* Save the shared descriptor */ + flc = &priv->flc_desc[0].flc; + op_fle = fle + 1; + ip_fle = fle + 2; + sge = fle + 3; + + DPAA2_SET_FD_IVP(fd); + DPAA2_SET_FLE_IVP(op_fle); + DPAA2_SET_FLE_IVP(ip_fle); + + /* Configure FD as a FRAME LIST */ + DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle)); + DPAA2_SET_FD_COMPOUND_FMT(fd); + DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); + + /* Configure Output FLE with Scatter/Gather Entry */ + DPAA2_SET_FLE_SG_EXT(op_fle); + DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); + + /* OOP */ + if (dest_sgl) { + /* Configure Output SGE for Encap/Decap */ + DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = dest_sgl->vec[0].len; + out_len += sge->length; + /* o/p segs */ + for (i = 1; i < dest_sgl->num; i++) { + sge++; + DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = dest_sgl->vec[i].len; + out_len += sge->length; + } + sge->length = dest_sgl->vec[i - 1].tot_len; + + } else { + /* Configure Output SGE for Encap/Decap */ + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = sgl->vec[0].len; + out_len += sge->length; + /* o/p segs */ + for (i = 1; i < sgl->num; i++) { + sge++; + DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = sgl->vec[i].len; + out_len += sge->length; + } + sge->length = sgl->vec[i - 1].tot_len; + } + out_len += sge->length; + + DPAA2_SET_FLE_FIN(sge); + op_fle->length = out_len; + + sge++; + + /* Configure Input FLE with Scatter/Gather Entry */ + DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge)); + DPAA2_SET_FLE_SG_EXT(ip_fle); + DPAA2_SET_FLE_FIN(ip_fle); + + /* Configure input SGE for Encap/Decap */ + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = sgl->vec[0].len; + in_len += sge->length; + /* i/p segs */ + for (i = 1; i < sgl->num; i++) { + sge++; + DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = sgl->vec[i].len; + in_len += sge->length; + } + + ip_fle->length = in_len; + DPAA2_SET_FLE_FIN(sge); + + /* In case of PDCP, per packet HFN is stored in + * mbuf priv after sym_op. + */ + if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) { + uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)userdata + + sess->pdcp.hfn_ovd_offset); + /* enable HFN override */ + DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd); + DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd); + DPAA2_SET_FD_INTERNAL_JD(fd, hfn_ovd); + } + DPAA2_SET_FD_LEN(fd, ip_fle->length); return 0; } @@ -348,6 +575,7 @@ build_raw_dp_proto_compound_fd(uint8_t *drv_ctx, static int build_raw_dp_cipher_fd(uint8_t *drv_ctx, struct rte_crypto_sgl *sgl, + struct rte_crypto_sgl *dest_sgl, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *auth_iv, @@ -372,17 +600,7 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx, data_len = total_len - ofs.ofs.cipher.head - ofs.ofs.cipher.tail; data_offset = ofs.ofs.cipher.head; - if (sess->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 || - sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) { - if ((data_len & 7) || (data_offset & 7)) { - DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes"); - return -ENOTSUP; - } - - data_len = data_len >> 3; - data_offset = data_offset >> 3; - } - + /* For SNOW3G and ZUC, lengths in bits only supported */ /* first FLE entry used to store mbuf and session ctxt */ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE(2*sgl->num), @@ -413,17 +631,33 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx, op_fle->length = data_len; DPAA2_SET_FLE_SG_EXT(op_fle); - /* o/p 1st seg */ - DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); - DPAA2_SET_FLE_OFFSET(sge, data_offset); - sge->length = sgl->vec[0].len - data_offset; + /* OOP */ + if (dest_sgl) { + /* o/p 1st seg */ + DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[0].iova); + DPAA2_SET_FLE_OFFSET(sge, data_offset); + sge->length = dest_sgl->vec[0].len - data_offset; - /* o/p segs */ - for (i = 1; i < sgl->num; i++) { - sge++; - DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); - DPAA2_SET_FLE_OFFSET(sge, 0); - sge->length = sgl->vec[i].len; + /* o/p segs */ + for (i = 1; i < dest_sgl->num; i++) { + sge++; + DPAA2_SET_FLE_ADDR(sge, dest_sgl->vec[i].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = dest_sgl->vec[i].len; + } + } else { + /* o/p 1st seg */ + DPAA2_SET_FLE_ADDR(sge, sgl->vec[0].iova); + DPAA2_SET_FLE_OFFSET(sge, data_offset); + sge->length = sgl->vec[0].len - data_offset; + + /* o/p segs */ + for (i = 1; i < sgl->num; i++) { + sge++; + DPAA2_SET_FLE_ADDR(sge, sgl->vec[i].iova); + DPAA2_SET_FLE_OFFSET(sge, 0); + sge->length = sgl->vec[i].len; + } } DPAA2_SET_FLE_FIN(sge); @@ -526,6 +760,7 @@ dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx, memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); ret = sess->build_raw_dp_fd(drv_ctx, &vec->src_sgl[loop], + &vec->dest_sgl[loop], &vec->iv[loop], &vec->digest[loop], &vec->auth_iv[loop], @@ -624,7 +859,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx, struct qbman_result *dq_storage; uint32_t fqid = dpaa2_qp->rx_vq.fqid; int ret, num_rx = 0; - uint8_t is_last = 0, status; + uint8_t is_last = 0, status, is_success = 0; struct qbman_swp *swp; const struct qbman_fd *fd; struct qbman_pull_desc pulldesc; @@ -703,11 +938,11 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx, /* TODO Parse SEC errors */ DPAA2_SEC_ERR("SEC returned Error - %x", fd->simple.frc); - status = RTE_CRYPTO_OP_STATUS_ERROR; + is_success = false; } else { - status = RTE_CRYPTO_OP_STATUS_SUCCESS; + is_success = true; } - post_dequeue(user_data, num_rx, status); + post_dequeue(user_data, num_rx, is_success); num_rx++; dq_storage++; @@ -792,10 +1027,9 @@ dpaa2_sec_configure_raw_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id, sess->build_raw_dp_fd = build_raw_dp_auth_fd; else if (sess->ctxt_type == DPAA2_SEC_CIPHER) sess->build_raw_dp_fd = build_raw_dp_cipher_fd; - else if (sess->ctxt_type == DPAA2_SEC_IPSEC) + else if (sess->ctxt_type == DPAA2_SEC_IPSEC || + sess->ctxt_type == DPAA2_SEC_PDCP) sess->build_raw_dp_fd = build_raw_dp_proto_fd; - else if (sess->ctxt_type == DPAA2_SEC_PDCP) - sess->build_raw_dp_fd = build_raw_dp_proto_compound_fd; else return -ENOTSUP; dp_ctx = (struct dpaa2_sec_raw_dp_ctx *)raw_dp_ctx->drv_ctx_data;