X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fdpaa_sec%2Fdpaa_sec.c;h=122c80a072ffa1ac038509405ee1612eaed029db;hb=317862a4e44f;hp=69965cd8786eb9915a5bc657f101d1cdab505e06;hpb=2717246ecd7d27125a346a2c5c55b53a9c251a93;p=dpdk.git diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index 69965cd878..122c80a072 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2017-2018 NXP + * Copyright 2017-2019 NXP * */ @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -32,6 +33,7 @@ #include #include #include +#include #include #include @@ -59,7 +61,7 @@ dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; } - /* report op status to sym->op and then free the ctx memeory */ + /* report op status to sym->op and then free the ctx memory */ rte_mempool_put(ctx->ctx_pool, (void *)ctx); } @@ -106,6 +108,12 @@ dpaa_mem_vtop(void *vaddr) static inline void * dpaa_mem_ptov(rte_iova_t paddr) { + void *va; + + va = (void *)dpaax_iova_table_get_va(paddr); + if (likely(va)) + return va; + return rte_mem_iova2virt(paddr); } @@ -259,6 +267,11 @@ static inline int is_proto_ipsec(dpaa_sec_session *ses) return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC); } +static inline int is_proto_pdcp(dpaa_sec_session *ses) +{ + return (ses->proto_alg == RTE_SECURITY_PROTOCOL_PDCP); +} + static inline int is_encode(dpaa_sec_session *ses) { return ses->dir == DIR_ENC; @@ -274,6 +287,9 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a) { switch (ses->auth_alg) { case RTE_CRYPTO_AUTH_NULL: + alginfo_a->algtype = + (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? + OP_PCL_IPSEC_HMAC_NULL : 0; ses->digest_length = 0; break; case RTE_CRYPTO_AUTH_MD5_HMAC: @@ -322,6 +338,9 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c) { switch (ses->cipher_alg) { case RTE_CRYPTO_CIPHER_NULL: + alginfo_c->algtype = + (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ? + OP_PCL_IPSEC_NULL : 0; break; case RTE_CRYPTO_CIPHER_AES_CBC: alginfo_c->algtype = @@ -359,6 +378,236 @@ caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo) } } +static int +dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses) +{ + struct alginfo authdata = {0}, cipherdata = {0}; + struct sec_cdb *cdb = &ses->cdb; + int32_t shared_desc_len = 0; + int err; +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + int swap = false; +#else + int swap = true; +#endif + + switch (ses->cipher_alg) { + case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: + cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW; + break; + case RTE_CRYPTO_CIPHER_ZUC_EEA3: + cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + cipherdata.algtype = PDCP_CIPHER_TYPE_AES; + break; + case RTE_CRYPTO_CIPHER_NULL: + cipherdata.algtype = PDCP_CIPHER_TYPE_NULL; + break; + default: + DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", + ses->cipher_alg); + return -1; + } + + cipherdata.key = (size_t)ses->cipher_key.data; + cipherdata.keylen = ses->cipher_key.length; + cipherdata.key_enc_flags = 0; + cipherdata.key_type = RTA_DATA_IMM; + + if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) { + switch (ses->auth_alg) { + case RTE_CRYPTO_AUTH_SNOW3G_UIA2: + authdata.algtype = PDCP_AUTH_TYPE_SNOW; + break; + case RTE_CRYPTO_AUTH_ZUC_EIA3: + authdata.algtype = PDCP_AUTH_TYPE_ZUC; + break; + case RTE_CRYPTO_AUTH_AES_CMAC: + authdata.algtype = PDCP_AUTH_TYPE_AES; + break; + case RTE_CRYPTO_AUTH_NULL: + authdata.algtype = PDCP_AUTH_TYPE_NULL; + break; + default: + DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", + ses->auth_alg); + return -1; + } + + authdata.key = (size_t)ses->auth_key.data; + authdata.keylen = ses->auth_key.length; + authdata.key_enc_flags = 0; + authdata.key_type = RTA_DATA_IMM; + + cdb->sh_desc[0] = cipherdata.keylen; + cdb->sh_desc[1] = authdata.keylen; + err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, + MIN_JOB_DESC_SIZE, + (unsigned int *)cdb->sh_desc, + &cdb->sh_desc[2], 2); + + if (err < 0) { + DPAA_SEC_ERR("Crypto: Incorrect key lengths"); + return err; + } + if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) { + cipherdata.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)cipherdata.key); + cipherdata.key_type = RTA_DATA_PTR; + } + if (!(cdb->sh_desc[2] & (1<<1)) && authdata.keylen) { + authdata.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)authdata.key); + authdata.key_type = RTA_DATA_PTR; + } + + cdb->sh_desc[0] = 0; + cdb->sh_desc[1] = 0; + cdb->sh_desc[2] = 0; + + if (ses->dir == DIR_ENC) + shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap( + cdb->sh_desc, 1, swap, + ses->pdcp.hfn, + ses->pdcp.bearer, + ses->pdcp.pkt_dir, + ses->pdcp.hfn_threshold, + &cipherdata, &authdata, + 0); + else if (ses->dir == DIR_DEC) + shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap( + cdb->sh_desc, 1, swap, + ses->pdcp.hfn, + ses->pdcp.bearer, + ses->pdcp.pkt_dir, + ses->pdcp.hfn_threshold, + &cipherdata, &authdata, + 0); + } else { + cdb->sh_desc[0] = cipherdata.keylen; + err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, + MIN_JOB_DESC_SIZE, + (unsigned int *)cdb->sh_desc, + &cdb->sh_desc[2], 1); + + if (err < 0) { + DPAA_SEC_ERR("Crypto: Incorrect key lengths"); + return err; + } + if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) { + cipherdata.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)cipherdata.key); + cipherdata.key_type = RTA_DATA_PTR; + } + cdb->sh_desc[0] = 0; + cdb->sh_desc[1] = 0; + cdb->sh_desc[2] = 0; + + if (ses->dir == DIR_ENC) + shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap( + cdb->sh_desc, 1, swap, + ses->pdcp.sn_size, + ses->pdcp.hfn, + ses->pdcp.bearer, + ses->pdcp.pkt_dir, + ses->pdcp.hfn_threshold, + &cipherdata, 0); + else if (ses->dir == DIR_DEC) + shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap( + cdb->sh_desc, 1, swap, + ses->pdcp.sn_size, + ses->pdcp.hfn, + ses->pdcp.bearer, + ses->pdcp.pkt_dir, + ses->pdcp.hfn_threshold, + &cipherdata, 0); + } + + return shared_desc_len; +} + +/* prepare ipsec proto command block of the session */ +static int +dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses) +{ + struct alginfo cipherdata = {0}, authdata = {0}; + struct sec_cdb *cdb = &ses->cdb; + int32_t shared_desc_len = 0; + int err; +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + int swap = false; +#else + int swap = true; +#endif + + caam_cipher_alg(ses, &cipherdata); + if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { + DPAA_SEC_ERR("not supported cipher alg"); + return -ENOTSUP; + } + + cipherdata.key = (size_t)ses->cipher_key.data; + cipherdata.keylen = ses->cipher_key.length; + cipherdata.key_enc_flags = 0; + cipherdata.key_type = RTA_DATA_IMM; + + caam_auth_alg(ses, &authdata); + if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { + DPAA_SEC_ERR("not supported auth alg"); + return -ENOTSUP; + } + + authdata.key = (size_t)ses->auth_key.data; + authdata.keylen = ses->auth_key.length; + authdata.key_enc_flags = 0; + authdata.key_type = RTA_DATA_IMM; + + cdb->sh_desc[0] = cipherdata.keylen; + cdb->sh_desc[1] = authdata.keylen; + err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, + MIN_JOB_DESC_SIZE, + (unsigned int *)cdb->sh_desc, + &cdb->sh_desc[2], 2); + + if (err < 0) { + DPAA_SEC_ERR("Crypto: Incorrect key lengths"); + return err; + } + if (cdb->sh_desc[2] & 1) + cipherdata.key_type = RTA_DATA_IMM; + else { + cipherdata.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)cipherdata.key); + cipherdata.key_type = RTA_DATA_PTR; + } + if (cdb->sh_desc[2] & (1<<1)) + authdata.key_type = RTA_DATA_IMM; + else { + authdata.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)authdata.key); + authdata.key_type = RTA_DATA_PTR; + } + + cdb->sh_desc[0] = 0; + cdb->sh_desc[1] = 0; + cdb->sh_desc[2] = 0; + if (ses->dir == DIR_ENC) { + shared_desc_len = cnstr_shdsc_ipsec_new_encap( + cdb->sh_desc, + true, swap, SHR_SERIAL, + &ses->encap_pdb, + (uint8_t *)&ses->ip4_hdr, + &cipherdata, &authdata); + } else if (ses->dir == DIR_DEC) { + shared_desc_len = cnstr_shdsc_ipsec_new_decap( + cdb->sh_desc, + true, swap, SHR_SERIAL, + &ses->decap_pdb, + &cipherdata, &authdata); + } + return shared_desc_len; +} /* prepare command block of the session */ static int @@ -376,7 +625,11 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) memset(cdb, 0, sizeof(struct sec_cdb)); - if (is_cipher_only(ses)) { + if (is_proto_ipsec(ses)) { + shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses); + } else if (is_proto_pdcp(ses)) { + shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses); + } else if (is_cipher_only(ses)) { caam_cipher_alg(ses, &alginfo_c); if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { DPAA_SEC_ERR("not supported cipher alg"); @@ -390,7 +643,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) shared_desc_len = cnstr_shdsc_blkcipher( cdb->sh_desc, true, - swap, &alginfo_c, + swap, SHR_NEVER, &alginfo_c, NULL, ses->iv.length, ses->dir); @@ -407,7 +660,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) alginfo_a.key_type = RTA_DATA_IMM; shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true, - swap, &alginfo_a, + swap, SHR_NEVER, &alginfo_a, !ses->dir, ses->digest_length); } else if (is_aead(ses)) { @@ -423,13 +676,13 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) if (ses->dir == DIR_ENC) shared_desc_len = cnstr_shdsc_gcm_encap( - cdb->sh_desc, true, swap, + cdb->sh_desc, true, swap, SHR_NEVER, &alginfo, ses->iv.length, ses->digest_length); else shared_desc_len = cnstr_shdsc_gcm_decap( - cdb->sh_desc, true, swap, + cdb->sh_desc, true, swap, SHR_NEVER, &alginfo, ses->iv.length, ses->digest_length); @@ -484,28 +737,13 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) cdb->sh_desc[0] = 0; cdb->sh_desc[1] = 0; cdb->sh_desc[2] = 0; - if (is_proto_ipsec(ses)) { - if (ses->dir == DIR_ENC) { - shared_desc_len = cnstr_shdsc_ipsec_new_encap( - cdb->sh_desc, - true, swap, &ses->encap_pdb, - (uint8_t *)&ses->ip4_hdr, - &alginfo_c, &alginfo_a); - } else if (ses->dir == DIR_DEC) { - shared_desc_len = cnstr_shdsc_ipsec_new_decap( - cdb->sh_desc, - true, swap, &ses->decap_pdb, - &alginfo_c, &alginfo_a); - } - } else { - /* Auth_only_len is set as 0 here and it will be - * overwritten in fd for each packet. - */ - shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, - true, swap, &alginfo_c, &alginfo_a, - ses->iv.length, 0, - ses->digest_length, ses->dir); - } + /* Auth_only_len is set as 0 here and it will be + * overwritten in fd for each packet. + */ + shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, + true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a, + ses->iv.length, 0, + ses->digest_length, ses->dir); } if (shared_desc_len < 0) { @@ -526,12 +764,25 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) { struct qman_fq *fq; unsigned int pkts = 0; - int ret; + int num_rx_bufs, ret; struct qm_dqrr_entry *dq; + uint32_t vdqcr_flags = 0; fq = &qp->outq; - ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ? - DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops); + /* + * Until request for four buffers, we provide exact number of buffers. + * Otherwise we do not set the QM_VDQCR_EXACT flag. + * Not setting QM_VDQCR_EXACT flag can provide two more buffers than + * requested, so we request two less in this case. + */ + if (nb_ops < 4) { + vdqcr_flags = QM_VDQCR_EXACT; + num_rx_bufs = nb_ops; + } else { + num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ? + (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2); + } + ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); if (ret) return 0; @@ -1416,7 +1667,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, switch (op->sess_type) { case RTE_CRYPTO_OP_WITH_SESSION: ses = (dpaa_sec_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, cryptodev_driver_id); break; @@ -1432,20 +1683,31 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, nb_ops = loop; goto send_pkts; } - if (unlikely(!ses->qp || ses->qp != qp)) { - DPAA_SEC_DP_ERR("sess->qp - %p qp %p", - ses->qp, qp); + if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) { if (dpaa_sec_attach_sess_q(qp, ses)) { frames_to_send = loop; nb_ops = loop; goto send_pkts; } + } else if (unlikely(ses->qp[rte_lcore_id() % + MAX_DPAA_CORES] != qp)) { + DPAA_SEC_DP_ERR("Old:sess->qp = %p" + " New qp = %p\n", + ses->qp[rte_lcore_id() % + MAX_DPAA_CORES], qp); + frames_to_send = loop; + nb_ops = loop; + goto send_pkts; } auth_only_len = op->sym->auth.data.length - op->sym->cipher.data.length; if (rte_pktmbuf_is_contiguous(op->sym->m_src)) { - if (is_auth_only(ses)) { + if (is_proto_ipsec(ses)) { + cf = build_proto(op, ses); + } else if (is_proto_pdcp(ses)) { + cf = build_proto(op, ses); + } else if (is_auth_only(ses)) { cf = build_auth_only(op, ses); } else if (is_cipher_only(ses)) { cf = build_cipher_only(op, ses); @@ -1454,8 +1716,6 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, auth_only_len = ses->auth_only_len; } else if (is_auth_cipher(ses)) { cf = build_cipher_auth(op, ses); - } else if (is_proto_ipsec(ses)) { - cf = build_proto(op, ses); } else { DPAA_SEC_DP_ERR("not supported ops"); frames_to_send = loop; @@ -1486,7 +1746,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, } fd = &fds[loop]; - inq[loop] = ses->inq; + inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES]; fd->opaque_addr = 0; fd->cmd = 0; qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg)); @@ -1563,8 +1823,7 @@ dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, static int dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, __rte_unused const struct rte_cryptodev_qp_conf *qp_conf, - __rte_unused int socket_id, - __rte_unused struct rte_mempool *session_pool) + __rte_unused int socket_id) { struct dpaa_sec_dev_private *internals; struct dpaa_sec_qp *qp = NULL; @@ -1596,7 +1855,7 @@ dpaa_sec_queue_pair_count(struct rte_cryptodev *dev) /** Returns the size of session structure */ static unsigned int -dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused) +dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { PMD_INIT_FUNC_TRACE(); @@ -1681,13 +1940,13 @@ dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi) { unsigned int i; - for (i = 0; i < qi->max_nb_sessions; i++) { + for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) { if (qi->inq_attach[i] == 0) { qi->inq_attach[i] = 1; return &qi->inq[i]; } } - DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions); + DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions); return NULL; } @@ -1713,7 +1972,7 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) { int ret; - sess->qp = qp; + sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp; ret = dpaa_sec_prep_cdb(sess); if (ret) { DPAA_SEC_ERR("Unable to prepare sec cdb"); @@ -1726,7 +1985,8 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) return ret; } } - ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb), + ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES], + dpaa_mem_vtop(&sess->cdb), qman_fq_fqid(&qp->outq)); if (ret) DPAA_SEC_ERR("Unable to init sec queue"); @@ -1734,40 +1994,13 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) return ret; } -static int -dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused, - uint16_t qp_id __rte_unused, - void *ses __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - return 0; -} - -static int -dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, - uint16_t qp_id __rte_unused, - void *ses) -{ - dpaa_sec_session *sess = ses; - struct dpaa_sec_dev_private *qi = dev->data->dev_private; - - PMD_INIT_FUNC_TRACE(); - - if (sess->inq) - dpaa_sec_detach_rxq(qi, sess->inq); - sess->inq = NULL; - - sess->qp = NULL; - - return 0; -} - static int dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *sess) { struct dpaa_sec_dev_private *internals = dev->data->dev_private; dpaa_sec_session *session = sess; + uint32_t i; PMD_INIT_FUNC_TRACE(); @@ -1775,6 +2008,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, DPAA_SEC_ERR("invalid session struct"); return -EINVAL; } + memset(session, 0, sizeof(dpaa_sec_session)); /* Default IV length = 0 */ session->iv.length = 0; @@ -1822,11 +2056,16 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, return -EINVAL; } session->ctx_pool = internals->ctx_pool; - session->inq = dpaa_sec_attach_rxq(internals); - if (session->inq == NULL) { - DPAA_SEC_ERR("unable to attach sec queue"); - goto err1; + rte_spinlock_lock(&internals->lock); + for (i = 0; i < MAX_DPAA_CORES; i++) { + session->inq[i] = dpaa_sec_attach_rxq(internals); + if (session->inq[i] == NULL) { + DPAA_SEC_ERR("unable to attach sec queue"); + rte_spinlock_unlock(&internals->lock); + goto err1; + } } + rte_spinlock_unlock(&internals->lock); return 0; @@ -1839,7 +2078,7 @@ err1: } static int -dpaa_sec_session_configure(struct rte_cryptodev *dev, +dpaa_sec_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -1863,7 +2102,7 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev, return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); @@ -1872,12 +2111,12 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev, /** Clear the memory of session so it doesn't leave key material behind */ static void -dpaa_sec_session_clear(struct rte_cryptodev *dev, +dpaa_sec_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { struct dpaa_sec_dev_private *qi = dev->data->dev_private; - uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + uint8_t index = dev->driver_id, i; + void *sess_priv = get_sym_session_private_data(sess, index); PMD_INIT_FUNC_TRACE(); @@ -1886,12 +2125,16 @@ dpaa_sec_session_clear(struct rte_cryptodev *dev, if (sess_priv) { struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - if (s->inq) - dpaa_sec_detach_rxq(qi, s->inq); + for (i = 0; i < MAX_DPAA_CORES; i++) { + if (s->inq[i]) + dpaa_sec_detach_rxq(qi, s->inq[i]); + s->inq[i] = NULL; + s->qp[i] = NULL; + } rte_free(s->cipher_key.data); rte_free(s->auth_key.data); memset(s, 0, sizeof(dpaa_sec_session)); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -1903,111 +2146,87 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, { struct dpaa_sec_dev_private *internals = dev->data->dev_private; struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec; - struct rte_crypto_auth_xform *auth_xform; - struct rte_crypto_cipher_xform *cipher_xform; + struct rte_crypto_auth_xform *auth_xform = NULL; + struct rte_crypto_cipher_xform *cipher_xform = NULL; dpaa_sec_session *session = (dpaa_sec_session *)sess; + uint32_t i; PMD_INIT_FUNC_TRACE(); + memset(session, 0, sizeof(dpaa_sec_session)); if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { cipher_xform = &conf->crypto_xform->cipher; - auth_xform = &conf->crypto_xform->next->auth; + if (conf->crypto_xform->next) + auth_xform = &conf->crypto_xform->next->auth; } else { auth_xform = &conf->crypto_xform->auth; - cipher_xform = &conf->crypto_xform->next->cipher; + if (conf->crypto_xform->next) + cipher_xform = &conf->crypto_xform->next->cipher; } session->proto_alg = conf->protocol; - session->cipher_key.data = rte_zmalloc(NULL, - cipher_xform->key.length, - RTE_CACHE_LINE_SIZE); - if (session->cipher_key.data == NULL && - cipher_xform->key.length > 0) { - DPAA_SEC_ERR("No Memory for cipher key"); - return -ENOMEM; - } - session->cipher_key.length = cipher_xform->key.length; - session->auth_key.data = rte_zmalloc(NULL, - auth_xform->key.length, - RTE_CACHE_LINE_SIZE); - if (session->auth_key.data == NULL && - auth_xform->key.length > 0) { - DPAA_SEC_ERR("No Memory for auth key"); - rte_free(session->cipher_key.data); - return -ENOMEM; + if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) { + session->cipher_key.data = rte_zmalloc(NULL, + cipher_xform->key.length, + RTE_CACHE_LINE_SIZE); + if (session->cipher_key.data == NULL && + cipher_xform->key.length > 0) { + DPAA_SEC_ERR("No Memory for cipher key"); + return -ENOMEM; + } + memcpy(session->cipher_key.data, cipher_xform->key.data, + cipher_xform->key.length); + session->cipher_key.length = cipher_xform->key.length; + + switch (cipher_xform->algo) { + case RTE_CRYPTO_CIPHER_AES_CBC: + case RTE_CRYPTO_CIPHER_3DES_CBC: + case RTE_CRYPTO_CIPHER_AES_CTR: + break; + default: + DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u", + cipher_xform->algo); + goto out; + } + session->cipher_alg = cipher_xform->algo; + } else { + session->cipher_key.data = NULL; + session->cipher_key.length = 0; + session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; } - session->auth_key.length = auth_xform->key.length; - memcpy(session->cipher_key.data, cipher_xform->key.data, - cipher_xform->key.length); - memcpy(session->auth_key.data, auth_xform->key.data, - auth_xform->key.length); - switch (auth_xform->algo) { - case RTE_CRYPTO_AUTH_SHA1_HMAC: - session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC; - break; - case RTE_CRYPTO_AUTH_MD5_HMAC: - session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC; - break; - case RTE_CRYPTO_AUTH_SHA256_HMAC: - session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC; - break; - case RTE_CRYPTO_AUTH_SHA384_HMAC: - session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC; - break; - case RTE_CRYPTO_AUTH_SHA512_HMAC: - session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC; - break; - case RTE_CRYPTO_AUTH_AES_CMAC: - session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC; - break; - case RTE_CRYPTO_AUTH_NULL: + if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) { + session->auth_key.data = rte_zmalloc(NULL, + auth_xform->key.length, + RTE_CACHE_LINE_SIZE); + if (session->auth_key.data == NULL && + auth_xform->key.length > 0) { + DPAA_SEC_ERR("No Memory for auth key"); + rte_free(session->cipher_key.data); + return -ENOMEM; + } + memcpy(session->auth_key.data, auth_xform->key.data, + auth_xform->key.length); + session->auth_key.length = auth_xform->key.length; + + switch (auth_xform->algo) { + case RTE_CRYPTO_AUTH_SHA1_HMAC: + case RTE_CRYPTO_AUTH_MD5_HMAC: + case RTE_CRYPTO_AUTH_SHA256_HMAC: + case RTE_CRYPTO_AUTH_SHA384_HMAC: + case RTE_CRYPTO_AUTH_SHA512_HMAC: + case RTE_CRYPTO_AUTH_AES_CMAC: + break; + default: + DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", + auth_xform->algo); + goto out; + } + session->auth_alg = auth_xform->algo; + } else { + session->auth_key.data = NULL; + session->auth_key.length = 0; session->auth_alg = RTE_CRYPTO_AUTH_NULL; - break; - case RTE_CRYPTO_AUTH_SHA224_HMAC: - case RTE_CRYPTO_AUTH_AES_XCBC_MAC: - case RTE_CRYPTO_AUTH_SNOW3G_UIA2: - case RTE_CRYPTO_AUTH_SHA1: - case RTE_CRYPTO_AUTH_SHA256: - case RTE_CRYPTO_AUTH_SHA512: - case RTE_CRYPTO_AUTH_SHA224: - case RTE_CRYPTO_AUTH_SHA384: - case RTE_CRYPTO_AUTH_MD5: - case RTE_CRYPTO_AUTH_AES_GMAC: - case RTE_CRYPTO_AUTH_KASUMI_F9: - case RTE_CRYPTO_AUTH_AES_CBC_MAC: - case RTE_CRYPTO_AUTH_ZUC_EIA3: - DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", - auth_xform->algo); - goto out; - default: - DPAA_SEC_ERR("Crypto: Undefined Auth specified %u", - auth_xform->algo); - goto out; - } - - switch (cipher_xform->algo) { - case RTE_CRYPTO_CIPHER_AES_CBC: - session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC; - break; - case RTE_CRYPTO_CIPHER_3DES_CBC: - session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC; - break; - case RTE_CRYPTO_CIPHER_AES_CTR: - session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR; - break; - case RTE_CRYPTO_CIPHER_NULL: - case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: - case RTE_CRYPTO_CIPHER_3DES_ECB: - case RTE_CRYPTO_CIPHER_AES_ECB: - case RTE_CRYPTO_CIPHER_KASUMI_F8: - DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u", - cipher_xform->algo); - goto out; - default: - DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", - cipher_xform->algo); - goto out; } if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) { @@ -2035,7 +2254,8 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) | PDBOPTS_ESP_OIHI_PDB_INL | PDBOPTS_ESP_IVSRC | - PDBHMO_ESP_ENCAP_DTTL; + PDBHMO_ESP_ENCAP_DTTL | + PDBHMO_ESP_SNR; session->encap_pdb.spi = ipsec_xform->spi; session->encap_pdb.ip_hdr_len = sizeof(struct ip); @@ -2048,13 +2268,127 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, } else goto out; session->ctx_pool = internals->ctx_pool; - session->inq = dpaa_sec_attach_rxq(internals); - if (session->inq == NULL) { - DPAA_SEC_ERR("unable to attach sec queue"); - goto out; + rte_spinlock_lock(&internals->lock); + for (i = 0; i < MAX_DPAA_CORES; i++) { + session->inq[i] = dpaa_sec_attach_rxq(internals); + if (session->inq[i] == NULL) { + DPAA_SEC_ERR("unable to attach sec queue"); + rte_spinlock_unlock(&internals->lock); + goto out; + } } + rte_spinlock_unlock(&internals->lock); + + return 0; +out: + rte_free(session->auth_key.data); + rte_free(session->cipher_key.data); + memset(session, 0, sizeof(dpaa_sec_session)); + return -1; +} + +static int +dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev, + struct rte_security_session_conf *conf, + void *sess) +{ + struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp; + struct rte_crypto_sym_xform *xform = conf->crypto_xform; + struct rte_crypto_auth_xform *auth_xform = NULL; + struct rte_crypto_cipher_xform *cipher_xform = NULL; + dpaa_sec_session *session = (dpaa_sec_session *)sess; + struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private; + uint32_t i; + PMD_INIT_FUNC_TRACE(); + memset(session, 0, sizeof(dpaa_sec_session)); + + /* find xfrm types */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + cipher_xform = &xform->cipher; + if (xform->next != NULL) + auth_xform = &xform->next->auth; + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + auth_xform = &xform->auth; + if (xform->next != NULL) + cipher_xform = &xform->next->cipher; + } else { + DPAA_SEC_ERR("Invalid crypto type"); + return -EINVAL; + } + + session->proto_alg = conf->protocol; + if (cipher_xform) { + session->cipher_key.data = rte_zmalloc(NULL, + cipher_xform->key.length, + RTE_CACHE_LINE_SIZE); + if (session->cipher_key.data == NULL && + cipher_xform->key.length > 0) { + DPAA_SEC_ERR("No Memory for cipher key"); + return -ENOMEM; + } + session->cipher_key.length = cipher_xform->key.length; + memcpy(session->cipher_key.data, cipher_xform->key.data, + cipher_xform->key.length); + session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? + DIR_ENC : DIR_DEC; + session->cipher_alg = cipher_xform->algo; + } else { + session->cipher_key.data = NULL; + session->cipher_key.length = 0; + session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; + session->dir = DIR_ENC; + } + + /* Auth is only applicable for control mode operation. */ + if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) { + if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) { + DPAA_SEC_ERR( + "PDCP Seq Num size should be 5 bits for cmode"); + goto out; + } + if (auth_xform) { + session->auth_key.data = rte_zmalloc(NULL, + auth_xform->key.length, + RTE_CACHE_LINE_SIZE); + if (session->auth_key.data == NULL && + auth_xform->key.length > 0) { + DPAA_SEC_ERR("No Memory for auth key"); + rte_free(session->cipher_key.data); + return -ENOMEM; + } + session->auth_key.length = auth_xform->key.length; + memcpy(session->auth_key.data, auth_xform->key.data, + auth_xform->key.length); + session->auth_alg = auth_xform->algo; + } else { + session->auth_key.data = NULL; + session->auth_key.length = 0; + session->auth_alg = RTE_CRYPTO_AUTH_NULL; + } + } + session->pdcp.domain = pdcp_xform->domain; + session->pdcp.bearer = pdcp_xform->bearer; + session->pdcp.pkt_dir = pdcp_xform->pkt_dir; + session->pdcp.sn_size = pdcp_xform->sn_size; +#ifdef ENABLE_HFN_OVERRIDE + session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd; +#endif + session->pdcp.hfn = pdcp_xform->hfn; + session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold; + + session->ctx_pool = dev_priv->ctx_pool; + rte_spinlock_lock(&dev_priv->lock); + for (i = 0; i < MAX_DPAA_CORES; i++) { + session->inq[i] = dpaa_sec_attach_rxq(dev_priv); + if (session->inq[i] == NULL) { + DPAA_SEC_ERR("unable to attach sec queue"); + rte_spinlock_unlock(&dev_priv->lock); + goto out; + } + } + rte_spinlock_unlock(&dev_priv->lock); return 0; out: rte_free(session->auth_key.data); @@ -2083,6 +2417,10 @@ dpaa_sec_security_session_create(void *dev, ret = dpaa_sec_set_ipsec_session(cdev, conf, sess_private_data); break; + case RTE_SECURITY_PROTOCOL_PDCP: + ret = dpaa_sec_set_pdcp_session(cdev, conf, + sess_private_data); + break; case RTE_SECURITY_PROTOCOL_MACSEC: return -ENOTSUP; default: @@ -2115,7 +2453,7 @@ dpaa_sec_security_session_destroy(void *dev __rte_unused, rte_free(s->cipher_key.data); rte_free(s->auth_key.data); - memset(sess, 0, sizeof(dpaa_sec_session)); + memset(s, 0, sizeof(dpaa_sec_session)); set_sec_session_private_data(sess, NULL); rte_mempool_put(sess_mp, sess_priv); } @@ -2134,7 +2472,7 @@ dpaa_sec_dev_configure(struct rte_cryptodev *dev, PMD_INIT_FUNC_TRACE(); internals = dev->data->dev_private; - sprintf(str, "ctx_pool_%d", dev->data->dev_id); + snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id); if (!internals->ctx_pool) { internals->ctx_pool = rte_mempool_create((const char *)str, CTX_POOL_NUM_BUFS, @@ -2208,11 +2546,9 @@ static struct rte_cryptodev_ops crypto_ops = { .queue_pair_setup = dpaa_sec_queue_pair_setup, .queue_pair_release = dpaa_sec_queue_pair_release, .queue_pair_count = dpaa_sec_queue_pair_count, - .session_get_size = dpaa_sec_session_get_size, - .session_configure = dpaa_sec_session_configure, - .session_clear = dpaa_sec_session_clear, - .qp_attach_session = dpaa_sec_qp_attach_sess, - .qp_detach_session = dpaa_sec_qp_detach_sess, + .sym_session_get_size = dpaa_sec_sym_session_get_size, + .sym_session_configure = dpaa_sec_sym_session_configure, + .sym_session_clear = dpaa_sec_sym_session_clear }; static const struct rte_security_capability * @@ -2221,7 +2557,7 @@ dpaa_sec_capabilities_get(void *device __rte_unused) return dpaa_sec_security_cap; } -struct rte_security_ops dpaa_sec_security_ops = { +static const struct rte_security_ops dpaa_sec_security_ops = { .session_create = dpaa_sec_security_session_create, .session_update = NULL, .session_stats_get = NULL, @@ -2301,6 +2637,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) security_instance->sess_cnt = 0; cryptodev->security_ctx = security_instance; + rte_spinlock_init(&internals->lock); for (i = 0; i < internals->max_nb_queue_pairs; i++) { /* init qman fq for queue pair */ qp = &internals->qps[i]; @@ -2313,7 +2650,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID | QMAN_FQ_FLAG_TO_DCPORTAL; - for (i = 0; i < internals->max_nb_sessions; i++) { + for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) { /* create rx qman fq for sessions*/ ret = qman_create_fq(0, flags, &internals->inq[i]); if (unlikely(ret != 0)) { @@ -2333,7 +2670,7 @@ init_error: } static int -cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv, +cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, struct rte_dpaa_device *dpaa_dev) { struct rte_cryptodev *cryptodev; @@ -2341,7 +2678,8 @@ cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv, int retval; - sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id); + snprintf(cryptodev_name, sizeof(cryptodev_name), "dpaa_sec-%d", + dpaa_dev->id.dev_id); cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id()); if (cryptodev == NULL) @@ -2361,7 +2699,6 @@ cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv, dpaa_dev->crypto_dev = cryptodev; cryptodev->device = &dpaa_dev->device; - cryptodev->device->driver = &dpaa_drv->driver; /* init user callbacks */ TAILQ_INIT(&(cryptodev->link_intr_cbs)); @@ -2428,9 +2765,7 @@ RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver); RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver, cryptodev_driver_id); -RTE_INIT(dpaa_sec_init_log); -static void -dpaa_sec_init_log(void) +RTE_INIT(dpaa_sec_init_log) { dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa"); if (dpaa_logtype_sec >= 0)