#include <rte_cryptodev_pmd.h>
#include <rte_crypto.h>
#include <rte_cryptodev.h>
+#ifdef RTE_LIBRTE_SECURITY
#include <rte_security_driver.h>
+#endif
#include <rte_cycles.h>
#include <rte_dev.h>
#include <rte_kvargs.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
-#include <of.h>
+#include <dpaa_of.h>
/* RTA header files */
#include <desc/common.h>
return ctx;
}
-static inline rte_iova_t
-dpaa_mem_vtop(void *vaddr)
-{
- const struct rte_memseg *ms;
-
- ms = rte_mem_virt2memseg(vaddr, NULL);
- if (ms) {
- dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
- return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
- }
- return (size_t)NULL;
-}
-
-static inline void *
-dpaa_mem_ptov(rte_iova_t paddr)
-{
- void *va;
-
- va = (void *)dpaax_iova_table_get_va(paddr);
- if (likely(va))
- return va;
-
- return rte_mem_iova2virt(paddr);
-}
-
static void
ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
struct qman_fq *fq,
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
return ses->dir == DIR_DEC;
}
+#ifdef RTE_LIBRTE_SECURITY
static int
dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
{
if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
cipherdata.key =
- (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
+ (size_t)rte_dpaa_mem_vtop((void *)(size_t)cipherdata.key);
cipherdata.key_type = RTA_DATA_PTR;
}
if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
authdata.key =
- (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
+ (size_t)rte_dpaa_mem_vtop((void *)(size_t)authdata.key);
authdata.key_type = RTA_DATA_PTR;
}
ses->pdcp.hfn_threshold,
&cipherdata, p_authdata, 0);
}
-
return shared_desc_len;
}
cipherdata.algtype = ses->cipher_key.alg;
cipherdata.algmode = ses->cipher_key.algmode;
- authdata.key = (size_t)ses->auth_key.data;
- authdata.keylen = ses->auth_key.length;
- authdata.key_enc_flags = 0;
- authdata.key_type = RTA_DATA_IMM;
- authdata.algtype = ses->auth_key.alg;
- authdata.algmode = ses->auth_key.algmode;
+ if (ses->auth_key.length) {
+ authdata.key = (size_t)ses->auth_key.data;
+ authdata.keylen = ses->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+ authdata.algtype = ses->auth_key.alg;
+ authdata.algmode = ses->auth_key.algmode;
+ }
cdb->sh_desc[0] = cipherdata.keylen;
cdb->sh_desc[1] = authdata.keylen;
if (cdb->sh_desc[2] & 1)
cipherdata.key_type = RTA_DATA_IMM;
else {
- cipherdata.key = (size_t)dpaa_mem_vtop(
+ cipherdata.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)cipherdata.key);
cipherdata.key_type = RTA_DATA_PTR;
}
if (cdb->sh_desc[2] & (1<<1))
authdata.key_type = RTA_DATA_IMM;
else {
- authdata.key = (size_t)dpaa_mem_vtop(
+ authdata.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)authdata.key);
authdata.key_type = RTA_DATA_PTR;
}
}
return shared_desc_len;
}
-
+#endif
/* prepare command block of the session */
static int
dpaa_sec_prep_cdb(dpaa_sec_session *ses)
memset(cdb, 0, sizeof(struct sec_cdb));
switch (ses->ctxt) {
+#ifdef RTE_LIBRTE_SECURITY
case DPAA_SEC_IPSEC:
shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
break;
case DPAA_SEC_PDCP:
shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
break;
+#endif
case DPAA_SEC_CIPHER:
alginfo_c.key = (size_t)ses->cipher_key.data;
alginfo_c.keylen = ses->cipher_key.length;
shared_desc_len = cnstr_shdsc_blkcipher(
cdb->sh_desc, true,
swap, SHR_NEVER, &alginfo_c,
- NULL,
ses->iv.length,
ses->dir);
break;
if (cdb->sh_desc[2] & 1)
alginfo_c.key_type = RTA_DATA_IMM;
else {
- alginfo_c.key = (size_t)dpaa_mem_vtop(
+ alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)alginfo_c.key);
alginfo_c.key_type = RTA_DATA_PTR;
}
if (cdb->sh_desc[2] & (1<<1))
alginfo_a.key_type = RTA_DATA_IMM;
else {
- alginfo_a.key = (size_t)dpaa_mem_vtop(
+ alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)alginfo_a.key);
alginfo_a.key_type = RTA_DATA_PTR;
}
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
in_sg->extension = 1;
in_sg->final = 1;
in_sg->length = data_len;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
/* 1st seg */
sg = in_sg + 1;
} else {
sg->length = ses->iv.length;
}
- qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
in_sg->length += sg->length;
cpu_to_hw_sg(sg);
sg++;
sg++;
rte_memcpy(old_digest, sym->auth.digest.data,
ses->digest_length);
- start_addr = dpaa_mem_vtop(old_digest);
+ start_addr = rte_dpaa_mem_vtop(old_digest);
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
in_sg->length += ses->digest_length;
in_sg->extension = 1;
in_sg->final = 1;
in_sg->length = data_len;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
sg = &cf->sg[2];
if (ses->iv.length) {
} else {
sg->length = ses->iv.length;
}
- qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
in_sg->length += sg->length;
cpu_to_hw_sg(sg);
sg++;
rte_memcpy(old_digest, sym->auth.digest.data,
ses->digest_length);
/* let's check digest by hw */
- start_addr = dpaa_mem_vtop(old_digest);
+ start_addr = rte_dpaa_mem_vtop(old_digest);
sg++;
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
out_sg = &cf->sg[0];
out_sg->extension = 1;
out_sg->length = data_len;
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(out_sg);
/* 1st seg */
in_sg->length = data_len + ses->iv.length;
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* IV */
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
sg->extension = 1;
sg->final = 1;
sg->length = data_len + ses->iv.length;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
/* 2nd seg auth only */
if (ses->auth_only_len) {
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
sg->length = ses->auth_only_len;
cpu_to_hw_sg(sg);
}
sg++;
memcpy(ctx->digest, sym->aead.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
if (is_encode(ses)) {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
sg++;
if (ses->auth_only_len) {
qm_sg_entry_set64(sg,
- dpaa_mem_vtop(sym->aead.aad.data));
+ rte_dpaa_mem_vtop(sym->aead.aad.data));
sg->length = ses->auth_only_len;
length += sg->length;
cpu_to_hw_sg(sg);
sg->final = 1;
cpu_to_hw_sg(sg);
} else {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
sg++;
if (ses->auth_only_len) {
qm_sg_entry_set64(sg,
- dpaa_mem_vtop(sym->aead.aad.data));
+ rte_dpaa_mem_vtop(sym->aead.aad.data));
sg->length = ses->auth_only_len;
length += sg->length;
cpu_to_hw_sg(sg);
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg,
dst_start_addr + sym->aead.data.offset);
sg->length = sym->aead.data.length;
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
sg++;
memcpy(ctx->digest, sym->auth.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
if (is_encode(ses)) {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
sg->final = 1;
cpu_to_hw_sg(sg);
} else {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
sg->length = sym->cipher.data.length;
length = sg->length;
return cf;
}
+#ifdef RTE_LIBRTE_SECURITY
static inline struct dpaa_sec_job *
build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
{
/* output */
out_sg = &cf->sg[0];
out_sg->extension = 1;
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
/* 1st seg */
sg = &cf->sg[2];
in_len = mbuf->data_len;
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
/* 1st seg */
qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
return cf;
}
+#endif
static uint16_t
dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
op->sym->session,
cryptodev_driver_id);
break;
+#ifdef RTE_LIBRTE_SECURITY
case RTE_CRYPTO_OP_SECURITY_SESSION:
ses = (dpaa_sec_session *)
get_sec_session_private_data(
op->sym->sec_session);
break;
+#endif
default:
DPAA_SEC_DP_ERR(
"sessionless crypto op not supported");
nb_ops = loop;
goto send_pkts;
}
+
+ if (!ses) {
+ DPAA_SEC_DP_ERR("session not available");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+
if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
if (dpaa_sec_attach_sess_q(qp, ses)) {
frames_to_send = loop;
((op->sym->m_dst == NULL) ||
rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
switch (ses->ctxt) {
+#ifdef RTE_LIBRTE_SECURITY
case DPAA_SEC_PDCP:
case DPAA_SEC_IPSEC:
cf = build_proto(op, ses);
break;
+#endif
case DPAA_SEC_AUTH:
cf = build_auth_only(op, ses);
break;
}
} else {
switch (ses->ctxt) {
+#ifdef RTE_LIBRTE_SECURITY
case DPAA_SEC_PDCP:
case DPAA_SEC_IPSEC:
cf = build_proto_sg(op, ses);
break;
+#endif
case DPAA_SEC_AUTH:
cf = build_auth_only_sg(op, ses);
break;
inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
fd->opaque_addr = 0;
fd->cmd = 0;
- qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
+ qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
fd->_format1 = qm_fd_compound;
fd->length29 = 2 * sizeof(struct qm_sg_entry);
((auth_tail_len << 16) | auth_hdr_len);
}
+#ifdef RTE_LIBRTE_SECURITY
/* In case of PDCP, per packet HFN is stored in
* mbuf priv after sym_op.
*/
ses->pdcp.hfn_ovd_offset)),
ses->pdcp.hfn_ovd);
}
-
+#endif
}
send_pkts:
loop = 0;
struct rte_crypto_sym_xform *xform,
dpaa_sec_session *session)
{
+ session->ctxt = DPAA_SEC_CIPHER;
session->cipher_alg = xform->cipher.algo;
session->iv.length = xform->cipher.iv.length;
session->iv.offset = xform->cipher.iv.offset;
struct rte_crypto_sym_xform *xform,
dpaa_sec_session *session)
{
+ session->ctxt = DPAA_SEC_AUTH;
session->auth_alg = xform->auth.algo;
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
struct rte_crypto_cipher_xform *cipher_xform;
struct rte_crypto_auth_xform *auth_xform;
+ session->ctxt = DPAA_SEC_CIPHER_HASH;
if (session->auth_cipher_text) {
cipher_xform = &xform->cipher;
auth_xform = &xform->next->auth;
{
unsigned int i;
- for (i = 0; i < qi->max_nb_sessions * MAX_DPAA_CORES; i++) {
+ for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
if (qi->inq_attach[i] == 0) {
qi->inq_attach[i] = 1;
return &qi->inq[i];
{
unsigned int i;
- for (i = 0; i < qi->max_nb_sessions; i++) {
+ for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
if (&qi->inq[i] == fq) {
- qman_retire_fq(fq, NULL);
+ if (qman_retire_fq(fq, NULL) != 0)
+ DPAA_SEC_WARN("Queue is not retired\n");
qman_oos_fq(fq);
qi->inq_attach[i] = 0;
return 0;
}
}
ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
- dpaa_mem_vtop(&sess->cdb),
+ rte_dpaa_mem_vtop(&sess->cdb),
qman_fq_fqid(&qp->outq));
if (ret)
DPAA_SEC_ERR("Unable to init sec queue");
struct dpaa_sec_dev_private *internals = dev->data->dev_private;
dpaa_sec_session *session = sess;
uint32_t i;
+ int ret;
PMD_INIT_FUNC_TRACE();
/* Cipher Only */
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
session->auth_alg = RTE_CRYPTO_AUTH_NULL;
- session->ctxt = DPAA_SEC_CIPHER;
- dpaa_sec_cipher_init(dev, xform, session);
+ ret = dpaa_sec_cipher_init(dev, xform, session);
/* Authentication Only */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next == NULL) {
session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
session->ctxt = DPAA_SEC_AUTH;
- dpaa_sec_auth_init(dev, xform, session);
+ ret = dpaa_sec_auth_init(dev, xform, session);
/* Cipher then Authenticate */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- session->ctxt = DPAA_SEC_CIPHER_HASH;
session->auth_cipher_text = 1;
- dpaa_sec_chain_init(dev, xform, session);
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ ret = dpaa_sec_auth_init(dev, xform, session);
+ else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ ret = dpaa_sec_cipher_init(dev, xform, session);
+ else
+ ret = dpaa_sec_chain_init(dev, xform, session);
} else {
DPAA_SEC_ERR("Not supported: Auth then Cipher");
return -EINVAL;
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
- session->ctxt = DPAA_SEC_CIPHER_HASH;
session->auth_cipher_text = 0;
- dpaa_sec_chain_init(dev, xform, session);
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ ret = dpaa_sec_cipher_init(dev, xform, session);
+ else if (xform->next->cipher.algo
+ == RTE_CRYPTO_CIPHER_NULL)
+ ret = dpaa_sec_auth_init(dev, xform, session);
+ else
+ ret = dpaa_sec_chain_init(dev, xform, session);
} else {
DPAA_SEC_ERR("Not supported: Auth then Cipher");
return -EINVAL;
/* AEAD operation for AES-GCM kind of Algorithms */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
xform->next == NULL) {
- dpaa_sec_aead_init(dev, xform, session);
+ ret = dpaa_sec_aead_init(dev, xform, session);
} else {
DPAA_SEC_ERR("Invalid crypto type");
return -EINVAL;
}
+ if (ret) {
+ DPAA_SEC_ERR("unable to init session");
+ goto err1;
+ }
+
rte_spinlock_lock(&internals->lock);
for (i = 0; i < MAX_DPAA_CORES; i++) {
session->inq[i] = dpaa_sec_attach_rxq(internals);
}
}
+#ifdef RTE_LIBRTE_SECURITY
static int
-dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
- struct rte_security_session_conf *conf,
- void *sess)
+dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
+ struct rte_security_ipsec_xform *ipsec_xform,
+ dpaa_sec_session *session)
{
- struct dpaa_sec_dev_private *internals = dev->data->dev_private;
- struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
- struct rte_crypto_auth_xform *auth_xform = NULL;
- struct rte_crypto_cipher_xform *cipher_xform = NULL;
- dpaa_sec_session *session = (dpaa_sec_session *)sess;
- uint32_t i;
-
PMD_INIT_FUNC_TRACE();
- memset(session, 0, sizeof(dpaa_sec_session));
- if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
- cipher_xform = &conf->crypto_xform->cipher;
- if (conf->crypto_xform->next)
- auth_xform = &conf->crypto_xform->next->auth;
- } else {
- auth_xform = &conf->crypto_xform->auth;
- if (conf->crypto_xform->next)
- cipher_xform = &conf->crypto_xform->next->cipher;
+ session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for aead key");
+ return -1;
}
- session->proto_alg = conf->protocol;
- session->ctxt = DPAA_SEC_IPSEC;
+ memcpy(session->aead_key.data, aead_xform->key.data,
+ aead_xform->key.length);
- if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
+ session->digest_length = aead_xform->digest_length;
+ session->aead_key.length = aead_xform->key.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ switch (session->digest_length) {
+ case 8:
+ session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
+ break;
+ case 12:
+ session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
+ break;
+ case 16:
+ session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
+ break;
+ default:
+ DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
+ session->digest_length);
+ return -1;
+ }
+ if (session->dir == DIR_ENC) {
+ memcpy(session->encap_pdb.gcm.salt,
+ (uint8_t *)&(ipsec_xform->salt), 4);
+ } else {
+ memcpy(session->decap_pdb.gcm.salt,
+ (uint8_t *)&(ipsec_xform->salt), 4);
+ }
+ session->aead_key.algmode = OP_ALG_AAI_GCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ default:
+ DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
+ aead_xform->algo);
+ return -1;
+ }
+ return 0;
+}
+
+static int
+dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
+ struct rte_crypto_auth_xform *auth_xform,
+ struct rte_security_ipsec_xform *ipsec_xform,
+ dpaa_sec_session *session)
+{
+ if (cipher_xform) {
session->cipher_key.data = rte_zmalloc(NULL,
cipher_xform->key.length,
RTE_CACHE_LINE_SIZE);
DPAA_SEC_ERR("No Memory for cipher key");
return -ENOMEM;
}
+
+ session->cipher_key.length = cipher_xform->key.length;
memcpy(session->cipher_key.data, cipher_xform->key.data,
cipher_xform->key.length);
- session->cipher_key.length = cipher_xform->key.length;
-
- switch (cipher_xform->algo) {
- case RTE_CRYPTO_CIPHER_NULL:
- session->cipher_key.alg = OP_PCL_IPSEC_NULL;
- break;
- case RTE_CRYPTO_CIPHER_AES_CBC:
- session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
- session->cipher_key.algmode = OP_ALG_AAI_CBC;
- break;
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- session->cipher_key.alg = OP_PCL_IPSEC_3DES;
- session->cipher_key.algmode = OP_ALG_AAI_CBC;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
- session->cipher_key.algmode = OP_ALG_AAI_CTR;
- break;
- default:
- DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
- cipher_xform->algo);
- goto out;
- }
session->cipher_alg = cipher_xform->algo;
} else {
session->cipher_key.data = NULL;
session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
}
- if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
+ if (auth_xform) {
session->auth_key.data = rte_zmalloc(NULL,
auth_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL &&
auth_xform->key.length > 0) {
DPAA_SEC_ERR("No Memory for auth key");
- rte_free(session->cipher_key.data);
return -ENOMEM;
}
+ session->auth_key.length = auth_xform->key.length;
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- session->auth_key.length = auth_xform->key.length;
-
- switch (auth_xform->algo) {
- case RTE_CRYPTO_AUTH_NULL:
- session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
- session->digest_length = 0;
- break;
- case RTE_CRYPTO_AUTH_MD5_HMAC:
- session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
- session->auth_key.algmode = OP_ALG_AAI_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
- session->auth_key.algmode = OP_ALG_AAI_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA224_HMAC:
- session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_160;
- session->auth_key.algmode = OP_ALG_AAI_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
- session->auth_key.algmode = OP_ALG_AAI_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA384_HMAC:
- session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
- session->auth_key.algmode = OP_ALG_AAI_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA512_HMAC:
- session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
- session->auth_key.algmode = OP_ALG_AAI_HMAC;
- break;
- default:
- DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
- auth_xform->algo);
- goto out;
- }
session->auth_alg = auth_xform->algo;
+ session->digest_length = auth_xform->digest_length;
} else {
session->auth_key.data = NULL;
session->auth_key.length = 0;
session->auth_alg = RTE_CRYPTO_AUTH_NULL;
}
+ switch (session->auth_alg) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
+ session->auth_key.algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
+ session->auth_key.algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
+ session->auth_key.algmode = OP_ALG_AAI_HMAC;
+ if (session->digest_length != 16)
+ DPAA_SEC_WARN(
+ "+++Using sha256-hmac truncated len is non-standard,"
+ "it will not work with lookaside proto");
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
+ session->auth_key.algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
+ session->auth_key.algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
+ session->auth_alg);
+ return -1;
+ default:
+ DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
+ session->auth_alg);
+ return -1;
+ }
+
+ switch (session->cipher_alg) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
+ session->cipher_key.algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ session->cipher_key.alg = OP_PCL_IPSEC_3DES;
+ session->cipher_key.algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
+ session->cipher_key.algmode = OP_ALG_AAI_CTR;
+ if (session->dir == DIR_ENC) {
+ session->encap_pdb.ctr.ctr_initial = 0x00000001;
+ session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
+ } else {
+ session->decap_pdb.ctr.ctr_initial = 0x00000001;
+ session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ session->cipher_key.alg = OP_PCL_IPSEC_NULL;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ session->cipher_alg);
+ return -1;
+ default:
+ DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ session->cipher_alg);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ struct rte_crypto_aead_xform *aead_xform = NULL;
+ dpaa_sec_session *session = (dpaa_sec_session *)sess;
+ uint32_t i;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(session, 0, sizeof(dpaa_sec_session));
+ session->proto_alg = conf->protocol;
+ session->ctxt = DPAA_SEC_IPSEC;
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
+ session->dir = DIR_ENC;
+ else
+ session->dir = DIR_DEC;
+
+ if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ if (conf->crypto_xform->next)
+ auth_xform = &conf->crypto_xform->next->auth;
+ ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
+ ipsec_xform, session);
+ } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = &conf->crypto_xform->auth;
+ if (conf->crypto_xform->next)
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
+ ipsec_xform, session);
+ } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = &conf->crypto_xform->aead;
+ ret = dpaa_sec_ipsec_aead_init(aead_xform,
+ ipsec_xform, session);
+ } else {
+ DPAA_SEC_ERR("XFORM not specified");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (ret) {
+ DPAA_SEC_ERR("Failed to process xform");
+ goto out;
+ }
+
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
if (ipsec_xform->tunnel.type ==
RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
- memset(&session->encap_pdb, 0,
- sizeof(struct ipsec_encap_pdb) +
- sizeof(session->ip4_hdr));
session->ip4_hdr.ip_v = IPVERSION;
session->ip4_hdr.ip_hl = 5;
session->ip4_hdr.ip_len = rte_cpu_to_be_16(
session->encap_pdb.ip_hdr_len = sizeof(struct ip);
} else if (ipsec_xform->tunnel.type ==
RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
- memset(&session->encap_pdb, 0,
- sizeof(struct ipsec_encap_pdb) +
- sizeof(session->ip6_hdr));
session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
DPAA_IPv6_DEFAULT_VTC_FLOW |
((ipsec_xform->tunnel.ipv6.dscp <<
if (ipsec_xform->options.esn)
session->encap_pdb.options |= PDBOPTS_ESP_ESN;
session->encap_pdb.spi = ipsec_xform->spi;
- session->dir = DIR_ENC;
+
} else if (ipsec_xform->direction ==
RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
- memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
session->decap_pdb.options = sizeof(struct ip) << 16;
else
sizeof(struct rte_ipv6_hdr) << 16;
if (ipsec_xform->options.esn)
session->decap_pdb.options |= PDBOPTS_ESP_ESN;
- session->dir = DIR_DEC;
+ if (ipsec_xform->replay_win_sz) {
+ uint32_t win_sz;
+ win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
+
+ switch (win_sz) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
+ break;
+ case 64:
+ session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
+ break;
+ default:
+ session->decap_pdb.options |=
+ PDBOPTS_ESP_ARS128;
+ }
+ }
} else
goto out;
rte_spinlock_lock(&internals->lock);
}
return 0;
}
-
+#endif
static int
dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_cryptodev_config *config __rte_unused)
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
.sym_session_clear = dpaa_sec_sym_session_clear
};
+#ifdef RTE_LIBRTE_SECURITY
static const struct rte_security_capability *
dpaa_sec_capabilities_get(void *device __rte_unused)
{
.set_pkt_metadata = NULL,
.capabilities_get = dpaa_sec_capabilities_get
};
-
+#endif
static int
dpaa_sec_uninit(struct rte_cryptodev *dev)
{
dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
{
struct dpaa_sec_dev_private *internals;
+#ifdef RTE_LIBRTE_SECURITY
struct rte_security_ctx *security_instance;
+#endif
struct dpaa_sec_qp *qp;
uint32_t i, flags;
int ret;
DPAA_SEC_WARN("Device already init by primary process");
return 0;
}
-
+#ifdef RTE_LIBRTE_SECURITY
/* Initialize security_ctx only for primary process*/
security_instance = rte_malloc("rte_security_instances_ops",
sizeof(struct rte_security_ctx), 0);
security_instance->ops = &dpaa_sec_security_ops;
security_instance->sess_cnt = 0;
cryptodev->security_ctx = security_instance;
-
+#endif
rte_spinlock_init(&internals->lock);
for (i = 0; i < internals->max_nb_queue_pairs; i++) {
/* init qman fq for queue pair */
flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
QMAN_FQ_FLAG_TO_DCPORTAL;
- for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
+ for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
/* create rx qman fq for sessions*/
ret = qman_create_fq(0, flags, &internals->inq[i]);
if (unlikely(ret != 0)) {
}
}
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ retval = rte_dpaa_portal_init((void *)1);
+ if (retval) {
+ DPAA_SEC_ERR("Unable to initialize portal");
+ return retval;
+ }
+ }
+
/* Invoke PMD device initialization function */
retval = dpaa_sec_dev_init(cryptodev);
if (retval == 0)