/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2020 NXP
*
*/
#define SEC_FLC_DHR_OUTBOUND -114
#define SEC_FLC_DHR_INBOUND 0
-enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
-
static uint8_t cryptodev_driver_id;
-int dpaa2_logtype_sec;
-
#ifdef RTE_LIBRTE_SECURITY
static inline int
build_proto_compound_sg_fd(dpaa2_sec_session *sess,
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
DPAA2_SEC_DP_ERR("Proto:SG: Memory alloc failed for SGE");
- return -1;
+ return -ENOMEM;
}
memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
DPAA2_SET_FLE_ADDR(fle, (size_t)op);
* mbuf priv after sym_op.
*/
if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
- uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
+ uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
+ sess->pdcp.hfn_ovd_offset);
/*enable HFN override override */
DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
DPAA2_SEC_DP_ERR("Memory alloc failed");
- return -1;
+ return -ENOMEM;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
DPAA2_SET_FLE_ADDR(fle, (size_t)op);
* mbuf priv after sym_op.
*/
if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
- uint32_t hfn_ovd = *((uint8_t *)op + sess->pdcp.hfn_ovd_offset);
+ uint32_t hfn_ovd = *(uint32_t *)((uint8_t *)op +
+ sess->pdcp.hfn_ovd_offset);
/*enable HFN override override */
DPAA2_SET_FLE_INTERNAL_JD(ip_fle, hfn_ovd);
DPAA2_SET_FLE_INTERNAL_JD(op_fle, hfn_ovd);
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
- return -1;
+ return -ENOMEM;
}
memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
DPAA2_SET_FLE_ADDR(fle, (size_t)op);
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
- return -1;
+ return -ENOMEM;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
DPAA2_SET_FLE_ADDR(fle, (size_t)op);
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
- return -1;
+ return -ENOMEM;
}
memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
DPAA2_SET_FLE_ADDR(fle, (size_t)op);
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
DPAA2_SEC_ERR("Memory alloc failed for SGE");
- return -1;
+ return -ENOMEM;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
DPAA2_SET_FLE_ADDR(fle, (size_t)op);
sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
if ((data_len & 7) || (data_offset & 7)) {
DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
- return -1;
+ return -ENOTSUP;
}
data_len = data_len >> 3;
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
- return -1;
+ return -ENOMEM;
}
memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs));
/* first FLE entry used to store mbuf and session ctxt */
sess->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
if ((data_len & 7) || (data_offset & 7)) {
DPAA2_SEC_ERR("AUTH: len/offset must be full bytes");
- return -1;
+ return -ENOTSUP;
}
data_len = data_len >> 3;
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
- return -1;
+ return -ENOMEM;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
/* TODO we are using the first FLE entry to store Mbuf.
sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
if ((data_len & 7) || (data_offset & 7)) {
DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
- return -1;
+ return -ENOTSUP;
}
data_len = data_len >> 3;
RTE_CACHE_LINE_SIZE);
if (!fle) {
DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
- return -1;
+ return -ENOMEM;
}
memset(fle, 0, FLE_SG_MEM_SIZE(mbuf->nb_segs + sym_op->m_src->nb_segs));
/* first FLE entry used to store mbuf and session ctxt */
sess->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
if ((data_len & 7) || (data_offset & 7)) {
DPAA2_SEC_ERR("CIPHER: len/offset must be full bytes");
- return -1;
+ return -ENOTSUP;
}
data_len = data_len >> 3;
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
- return -1;
+ return -ENOMEM;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
/* TODO we are using the first FLE entry to store Mbuf.
op->sym->sec_session);
#endif
else
- return -1;
+ return -ENOTSUP;
if (!sess)
- return -1;
+ return -EINVAL;
/* Any of the buffer is segmented*/
if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
case DPAA2_SEC_HASH_CIPHER:
default:
DPAA2_SEC_ERR("error: Unsupported session");
+ ret = -ENOTSUP;
}
}
return ret;
uint32_t loop;
int32_t ret;
struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
- uint32_t frames_to_send;
+ uint32_t frames_to_send, retry_count;
struct qbman_eq_desc eqdesc;
struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
struct qbman_swp *swp;
if (!DPAA2_PER_LCORE_DPIO) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_SEC_ERR("Failure in affining portal");
+ DPAA2_SEC_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
return 0;
}
}
}
ops++;
}
+
loop = 0;
+ retry_count = 0;
while (loop < frames_to_send) {
- loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
- &fd_arr[loop],
- &flags[loop],
- frames_to_send - loop);
+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
+ &fd_arr[loop],
+ &flags[loop],
+ frames_to_send - loop);
+ if (unlikely(ret < 0)) {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+ num_tx += loop;
+ nb_ops -= loop;
+ goto skip_tx;
+ }
+ } else {
+ loop += ret;
+ retry_count = 0;
+ }
}
- num_tx += frames_to_send;
- nb_ops -= frames_to_send;
+ num_tx += loop;
+ nb_ops -= loop;
}
skip_tx:
dpaa2_qp->tx_vq.tx_pkts += num_tx;
#ifdef RTE_LIBRTE_SECURITY
if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
- dpaa2_sec_session *sess = (dpaa2_sec_session *)
- get_sec_session_private_data(op->sym->sec_session);
- if (sess->ctxt_type == DPAA2_SEC_IPSEC ||
- sess->ctxt_type == DPAA2_SEC_PDCP) {
- uint16_t len = DPAA2_GET_FD_LEN(fd);
- dst->pkt_len = len;
- while (dst->next != NULL) {
- len -= dst->data_len;
- dst = dst->next;
- }
- dst->data_len = len;
+ uint16_t len = DPAA2_GET_FD_LEN(fd);
+ dst->pkt_len = len;
+ while (dst->next != NULL) {
+ len -= dst->data_len;
+ dst = dst->next;
}
+ dst->data_len = len;
}
#endif
DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
if (!DPAA2_PER_LCORE_DPIO) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_SEC_ERR("Failure in affining portal");
+ DPAA2_SEC_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
return 0;
}
}
RTE_CACHE_LINE_SIZE);
if (!qp) {
DPAA2_SEC_ERR("malloc failed for rx/tx queues");
- return -1;
+ return -ENOMEM;
}
qp->rx_vq.crypto_data = dev->data;
RTE_CACHE_LINE_SIZE);
if (!qp->rx_vq.q_storage) {
DPAA2_SEC_ERR("malloc failed for q_storage");
- return -1;
+ return -ENOMEM;
}
memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
DPAA2_SEC_ERR("Unable to allocate dequeue storage");
- return -1;
+ return -ENOMEM;
}
dev->data->queue_pairs[qp_id] = qp;
return retcode;
}
-/** Return the number of allocated queue pairs */
-static uint32_t
-dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
-{
- PMD_INIT_FUNC_TRACE();
-
- return dev->data->nb_queue_pairs;
-}
-
/** Returns the size of the aesni gcm session structure */
static unsigned int
dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo cipherdata;
- int bufsize;
+ int bufsize, ret = 0;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
DPAA2_SEC_ERR("No Memory for priv CTXT");
- return -1;
+ return -ENOMEM;
}
priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[0].flc;
+ session->ctxt_type = DPAA2_SEC_CIPHER;
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL) {
DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
- return -1;
+ return -ENOMEM;
}
session->cipher_key.length = xform->cipher.key.length;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
- SHR_NEVER, &cipherdata, NULL,
+ SHR_NEVER, &cipherdata,
session->iv.length,
session->dir);
break;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
- SHR_NEVER, &cipherdata, NULL,
+ SHR_NEVER, &cipherdata,
session->iv.length,
session->dir);
break;
cipherdata.algmode = OP_ALG_AAI_CTR;
session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
- SHR_NEVER, &cipherdata, NULL,
+ SHR_NEVER, &cipherdata,
session->iv.length,
session->dir);
break;
cipherdata.algmode = OP_ALG_AAI_CTR;
session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CTR;
bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
- SHR_NEVER, &cipherdata, NULL,
+ SHR_NEVER, &cipherdata,
session->iv.length,
session->dir);
break;
case RTE_CRYPTO_CIPHER_NULL:
DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
xform->cipher.algo);
+ ret = -ENOTSUP;
goto error_out;
default:
DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
xform->cipher.algo);
+ ret = -ENOTSUP;
goto error_out;
}
if (bufsize < 0) {
DPAA2_SEC_ERR("Crypto: Descriptor build failed");
+ ret = -EINVAL;
goto error_out;
}
for (i = 0; i < bufsize; i++)
DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
#endif
- return 0;
+ return ret;
error_out:
rte_free(session->cipher_key.data);
rte_free(priv);
- return -1;
+ return ret;
}
static int
{
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata;
- int bufsize;
+ int bufsize, ret = 0;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
DPAA2_SEC_ERR("No Memory for priv CTXT");
- return -1;
+ return -ENOMEM;
}
priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[DESC_INITFINAL].flc;
+ session->ctxt_type = DPAA2_SEC_AUTH;
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL) {
DPAA2_SEC_ERR("Unable to allocate memory for auth key");
rte_free(priv);
- return -1;
+ return -ENOMEM;
}
session->auth_key.length = xform->auth.key.length;
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
xform->auth.algo);
+ ret = -ENOTSUP;
goto error_out;
default:
DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
xform->auth.algo);
+ ret = -ENOTSUP;
goto error_out;
}
if (bufsize < 0) {
DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ ret = -EINVAL;
goto error_out;
}
i, priv->flc_desc[DESC_INITFINAL].desc[i]);
#endif
- return 0;
+ return ret;
error_out:
rte_free(session->auth_key.data);
rte_free(priv);
- return -1;
+ return ret;
}
static int
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
- int err;
+ int err, ret = 0;
PMD_INIT_FUNC_TRACE();
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
DPAA2_SEC_ERR("No Memory for priv CTXT");
- return -1;
+ return -ENOMEM;
}
priv->fle_pool = dev_priv->fle_pool;
if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
DPAA2_SEC_ERR("No Memory for aead key");
rte_free(priv);
- return -1;
+ return -ENOMEM;
}
memcpy(session->aead_key.data, aead_xform->key.data,
aead_xform->key.length);
case RTE_CRYPTO_AEAD_AES_CCM:
DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
aead_xform->algo);
+ ret = -ENOTSUP;
goto error_out;
default:
DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
aead_xform->algo);
+ ret = -ENOTSUP;
goto error_out;
}
session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
priv->flc_desc[0].desc[0] = aeaddata.keylen;
err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
- MIN_JOB_DESC_SIZE,
+ DESC_JOB_IO_LEN,
(unsigned int *)priv->flc_desc[0].desc,
&priv->flc_desc[0].desc[1], 1);
if (err < 0) {
DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
+ ret = -EINVAL;
goto error_out;
}
if (priv->flc_desc[0].desc[1] & 1) {
session->digest_length);
if (bufsize < 0) {
DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ ret = -EINVAL;
goto error_out;
}
DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
i, priv->flc_desc[0].desc[i]);
#endif
- return 0;
+ return ret;
error_out:
rte_free(session->aead_key.data);
rte_free(priv);
- return -1;
+ return ret;
}
struct sec_flow_context *flc;
struct rte_crypto_cipher_xform *cipher_xform;
struct rte_crypto_auth_xform *auth_xform;
- int err;
+ int err, ret = 0;
PMD_INIT_FUNC_TRACE();
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
DPAA2_SEC_ERR("No Memory for priv CTXT");
- return -1;
+ return -ENOMEM;
}
priv->fle_pool = dev_priv->fle_pool;
if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
- return -1;
+ return -ENOMEM;
}
session->cipher_key.length = cipher_xform->key.length;
session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
DPAA2_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
rte_free(priv);
- return -1;
+ return -ENOMEM;
}
session->auth_key.length = auth_xform->key.length;
memcpy(session->cipher_key.data, cipher_xform->key.data,
case RTE_CRYPTO_AUTH_ZUC_EIA3:
DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
auth_xform->algo);
+ ret = -ENOTSUP;
goto error_out;
default:
DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
auth_xform->algo);
+ ret = -ENOTSUP;
goto error_out;
}
cipherdata.key = (size_t)session->cipher_key.data;
case RTE_CRYPTO_CIPHER_KASUMI_F8:
DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
cipher_xform->algo);
+ ret = -ENOTSUP;
goto error_out;
default:
DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
cipher_xform->algo);
+ ret = -ENOTSUP;
goto error_out;
}
session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
priv->flc_desc[0].desc[0] = cipherdata.keylen;
priv->flc_desc[0].desc[1] = authdata.keylen;
err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
- MIN_JOB_DESC_SIZE,
+ DESC_JOB_IO_LEN,
(unsigned int *)priv->flc_desc[0].desc,
&priv->flc_desc[0].desc[2], 2);
if (err < 0) {
DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
+ ret = -EINVAL;
goto error_out;
}
if (priv->flc_desc[0].desc[2] & 1) {
session->dir);
if (bufsize < 0) {
DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ ret = -EINVAL;
goto error_out;
}
} else {
DPAA2_SEC_ERR("Hash before cipher not supported");
+ ret = -ENOTSUP;
goto error_out;
}
i, priv->flc_desc[0].desc[i]);
#endif
- return 0;
+ return ret;
error_out:
rte_free(session->cipher_key.data);
rte_free(session->auth_key.data);
rte_free(priv);
- return -1;
+ return ret;
}
static int
if (unlikely(sess == NULL)) {
DPAA2_SEC_ERR("Invalid session struct");
- return -1;
+ return -EINVAL;
}
memset(session, 0, sizeof(dpaa2_sec_session));
/* Cipher Only */
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
- session->ctxt_type = DPAA2_SEC_CIPHER;
ret = dpaa2_sec_cipher_init(dev, xform, session);
/* Authentication Only */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next == NULL) {
- session->ctxt_type = DPAA2_SEC_AUTH;
ret = dpaa2_sec_auth_init(dev, xform, session);
/* Cipher then Authenticate */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
session->ext_params.aead_ctxt.auth_cipher_text = true;
- ret = dpaa2_sec_aead_chain_init(dev, xform, session);
-
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ ret = dpaa2_sec_auth_init(dev, xform, session);
+ else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ ret = dpaa2_sec_cipher_init(dev, xform, session);
+ else
+ ret = dpaa2_sec_aead_chain_init(dev, xform, session);
/* Authenticate then Cipher */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
session->ext_params.aead_ctxt.auth_cipher_text = false;
- ret = dpaa2_sec_aead_chain_init(dev, xform, session);
-
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ ret = dpaa2_sec_cipher_init(dev, xform, session);
+ else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ ret = dpaa2_sec_auth_init(dev, xform, session);
+ else
+ ret = dpaa2_sec_aead_chain_init(dev, xform, session);
/* AEAD operation for AES-GCM kind of Algorithms */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
xform->next == NULL) {
RTE_CACHE_LINE_SIZE);
if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
DPAA2_SEC_ERR("No Memory for aead key");
- return -1;
+ return -ENOMEM;
}
memcpy(session->aead_key.data, aead_xform->key.data,
aead_xform->key.length);
switch (aead_xform->algo) {
case RTE_CRYPTO_AEAD_AES_GCM:
- aeaddata->algtype = OP_ALG_ALGSEL_AES;
+ switch (session->digest_length) {
+ case 8:
+ aeaddata->algtype = OP_PCL_IPSEC_AES_GCM8;
+ break;
+ case 12:
+ aeaddata->algtype = OP_PCL_IPSEC_AES_GCM12;
+ break;
+ case 16:
+ aeaddata->algtype = OP_PCL_IPSEC_AES_GCM16;
+ break;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined GCM digest %d",
+ session->digest_length);
+ return -EINVAL;
+ }
aeaddata->algmode = OP_ALG_AAI_GCM;
session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
- aeaddata->algtype = OP_ALG_ALGSEL_AES;
+ switch (session->digest_length) {
+ case 8:
+ aeaddata->algtype = OP_PCL_IPSEC_AES_CCM8;
+ break;
+ case 12:
+ aeaddata->algtype = OP_PCL_IPSEC_AES_CCM12;
+ break;
+ case 16:
+ aeaddata->algtype = OP_PCL_IPSEC_AES_CCM16;
+ break;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined CCM digest %d",
+ session->digest_length);
+ return -EINVAL;
+ }
aeaddata->algmode = OP_ALG_AAI_CCM;
session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
break;
default:
DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
aead_xform->algo);
- return -1;
+ return -ENOTSUP;
}
session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
DIR_ENC : DIR_DEC;
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
session->auth_alg = auth_xform->algo;
+ session->digest_length = auth_xform->digest_length;
} else {
session->auth_key.data = NULL;
session->auth_key.length = 0;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
authdata->algmode = OP_ALG_AAI_HMAC;
+ if (session->digest_length != 16)
+ DPAA2_SEC_WARN(
+ "+++Using sha256-hmac truncated len is non-standard,"
+ "it will not work with lookaside proto");
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
case RTE_CRYPTO_AUTH_ZUC_EIA3:
DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
session->auth_alg);
- return -1;
+ return -ENOTSUP;
default:
DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
session->auth_alg);
- return -1;
+ return -ENOTSUP;
}
cipherdata->key = (size_t)session->cipher_key.data;
cipherdata->keylen = session->cipher_key.length;
case RTE_CRYPTO_CIPHER_KASUMI_F8:
DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
session->cipher_alg);
- return -1;
+ return -ENOTSUP;
default:
DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
session->cipher_alg);
- return -1;
+ return -ENOTSUP;
}
return 0;
struct rte_crypto_aead_xform *aead_xform = NULL;
dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
struct ctxt_priv *priv;
- struct ipsec_encap_pdb encap_pdb;
- struct ipsec_decap_pdb decap_pdb;
struct alginfo authdata, cipherdata;
int bufsize;
struct sec_flow_context *flc;
aead_xform = &conf->crypto_xform->aead;
ret = dpaa2_sec_ipsec_aead_init(aead_xform,
session, &cipherdata);
+ authdata.keylen = 0;
+ authdata.algtype = 0;
} else {
DPAA2_SEC_ERR("XFORM not specified");
ret = -EINVAL;
uint8_t *hdr = NULL;
struct ip ip4_hdr;
struct rte_ipv6_hdr ip6_hdr;
+ struct ipsec_encap_pdb encap_pdb;
flc->dhr = SEC_FLC_DHR_OUTBOUND;
/* For Sec Proto only one descriptor is required. */
memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
+
+ /* copy algo specific data to PDB */
+ switch (cipherdata.algtype) {
+ case OP_PCL_IPSEC_AES_CTR:
+ encap_pdb.ctr.ctr_initial = 0x00000001;
+ encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
+ break;
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ memcpy(encap_pdb.gcm.salt,
+ (uint8_t *)&(ipsec_xform->salt), 4);
+ break;
+ }
+
encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
PDBOPTS_ESP_OIHI_PDB_INL |
PDBOPTS_ESP_IVSRC |
hdr, &cipherdata, &authdata);
} else if (ipsec_xform->direction ==
RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ struct ipsec_decap_pdb decap_pdb;
+
flc->dhr = SEC_FLC_DHR_INBOUND;
memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+ /* copy algo specific data to PDB */
+ switch (cipherdata.algtype) {
+ case OP_PCL_IPSEC_AES_CTR:
+ decap_pdb.ctr.ctr_initial = 0x00000001;
+ decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
+ break;
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ memcpy(decap_pdb.gcm.salt,
+ (uint8_t *)&(ipsec_xform->salt), 4);
+ break;
+ }
+
decap_pdb.options = (ipsec_xform->tunnel.type ==
RTE_SECURITY_IPSEC_TUNNEL_IPV4) ?
sizeof(struct ip) << 16 :
sizeof(struct rte_ipv6_hdr) << 16;
if (ipsec_xform->options.esn)
decap_pdb.options |= PDBOPTS_ESP_ESN;
+
+ if (ipsec_xform->replay_win_sz) {
+ uint32_t win_sz;
+ win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
+
+ switch (win_sz) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ decap_pdb.options |= PDBOPTS_ESP_ARS32;
+ break;
+ case 64:
+ decap_pdb.options |= PDBOPTS_ESP_ARS64;
+ break;
+ default:
+ decap_pdb.options |= PDBOPTS_ESP_ARS128;
+ }
+ }
session->dir = DIR_DEC;
bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
1, 0, SHR_SERIAL,
goto out;
}
+ if (rta_inline_pdcp_query(authdata.algtype,
+ cipherdata.algtype,
+ session->pdcp.sn_size,
+ session->pdcp.hfn_ovd)) {
+ cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+
if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
if (session->dir == DIR_ENC)
bufsize = cnstr_shdsc_pdcp_c_plane_encap(
rte_free(session->auth_key.data);
rte_free(session->cipher_key.data);
rte_free(priv);
- return -1;
+ return -EINVAL;
}
static int
}
}
-static void __attribute__((hot))
+static void __rte_hot
dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
const struct qbman_fd *fd,
const struct qbman_result *dq,
qbman_swp_dqrr_consume(swp, dq);
}
static void
-dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
+dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
const struct qbman_fd *fd,
const struct qbman_result *dq,
struct dpaa2_queue *rxq,
.stats_reset = dpaa2_sec_stats_reset,
.queue_pair_setup = dpaa2_sec_queue_pair_setup,
.queue_pair_release = dpaa2_sec_queue_pair_release,
- .queue_pair_count = dpaa2_sec_queue_pair_count,
.sym_session_get_size = dpaa2_sec_sym_session_get_size,
.sym_session_configure = dpaa2_sec_sym_session_configure,
.sym_session_clear = dpaa2_sec_sym_session_clear,
PMD_INIT_FUNC_TRACE();
dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
- if (dpaa2_dev == NULL) {
- DPAA2_SEC_ERR("DPAA2 SEC device not found");
- return -1;
- }
hw_id = dpaa2_dev->object_id;
cryptodev->driver_id = cryptodev_driver_id;
if (!dpseci) {
DPAA2_SEC_ERR(
"Error in allocating the memory for dpsec object");
- return -1;
+ return -ENOMEM;
}
- dpseci->regs = rte_mcp_ptr_list[0];
+ dpseci->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
if (retcode != 0) {
if (dpaa2_svr_family == SVR_LX2160A)
rta_set_sec_era(RTA_SEC_ERA_10);
+ else
+ rta_set_sec_era(RTA_SEC_ERA_8);
DPAA2_SEC_INFO("2-SEC ERA is %d", rta_get_sec_era());
RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
-
-RTE_INIT(dpaa2_sec_init_log)
-{
- /* Bus level logs */
- dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
- if (dpaa2_logtype_sec >= 0)
- rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);