#include "dpaa2_sec_priv.h"
#include "dpaa2_sec_logs.h"
+/* Required types */
+typedef uint64_t dma_addr_t;
+
/* RTA header files */
#include <hw/desc/ipsec.h>
#include <hw/desc/algo.h>
#define FLE_POOL_NUM_BUFS 32000
#define FLE_POOL_BUF_SIZE 256
#define FLE_POOL_CACHE_SIZE 512
+#define FLE_SG_MEM_SIZE 2048
#define SEC_FLC_DHR_OUTBOUND -114
#define SEC_FLC_DHR_INBOUND 0
static uint8_t cryptodev_driver_id;
+int dpaa2_logtype_sec;
+
static inline int
build_proto_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
- DPAA2_SET_FD_FLC(fd, ((uint64_t)flc));
+ DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
/* save physical address of mbuf */
op->sym->aead.digest.phys_addr = mbuf->buf_iova;
- mbuf->buf_iova = (uint64_t)op;
+ mbuf->buf_iova = (size_t)op;
+
+ return 0;
+}
+
+static inline int
+build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+ int icv_len = sess->digest_length;
+ uint8_t *old_icv;
+ struct rte_mbuf *mbuf;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ /* first FLE entry used to store mbuf and session ctxt */
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "iv-len=%d data_off: 0x%x\n",
+ sym_op->aead.data.offset,
+ sym_op->aead.data.length,
+ sess->digest_length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+ op_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + icv_len + auth_only_len) :
+ sym_op->aead.data.length + auth_only_len;
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->aead.data.offset -
+ auth_only_len);
+ sge->length = mbuf->data_len - sym_op->aead.data.offset + auth_only_len;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+ sge->length = icv_len;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ mbuf = sym_op->m_src;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ ip_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len +
+ icv_len);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+ sge->length = sess->iv.length;
+
+ sge++;
+ if (auth_only_len) {
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+ sge->length = auth_only_len;
+ sge++;
+ }
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->aead.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->aead.digest.data, icv_len);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = icv_len;
+ }
+
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
return 0;
}
*/
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
if (likely(bpid < MAX_BPID)) {
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+ DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
"iv-len=%d data_off: 0x%x\n",
sym_op->aead.data.offset,
sym_op->aead.data.length,
- sym_op->aead.digest.length,
+ sess->digest_length,
sess->iv.length,
sym_op->m_src->data_off);
return 0;
}
+static inline int
+build_authenc_sg_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sym_op->auth.data.length -
+ sym_op->cipher.data.length;
+ int icv_len = sess->digest_length;
+ uint8_t *old_icv;
+ struct rte_mbuf *mbuf;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ /* first FLE entry used to store mbuf and session ctxt */
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(op_fle, auth_only_len);
+
+ op_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->cipher.data.length + icv_len) :
+ sym_op->cipher.data.length;
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off + sym_op->auth.data.offset);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ sge->length = icv_len;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ mbuf = sym_op->m_src;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ ip_fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->auth.data.length + sess->iv.length) :
+ (sym_op->auth.data.length + sess->iv.length +
+ icv_len);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
+
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ sge->length -= icv_len;
+
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->auth.digest.data,
+ icv_len);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = icv_len;
+ }
+
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+ return 0;
+}
+
static inline int
build_authenc_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
*/
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
if (likely(bpid < MAX_BPID)) {
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
- "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
- sym_op->auth.data.offset,
- sym_op->auth.data.length,
- sess->digest_length,
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
return 0;
}
+static inline int build_auth_sg_fd(
+ dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd,
+ __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *old_digest;
+ struct rte_mbuf *mbuf;
+
+ PMD_INIT_FUNC_TRACE();
+
+ mbuf = sym_op->m_src;
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (unlikely(!fle)) {
+ DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ /* first FLE entry used to store mbuf and session ctxt */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+ /* sg FD */
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+
+ /* o/p fle */
+ DPAA2_SET_FLE_ADDR(op_fle,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ op_fle->length = sess->digest_length;
+
+ /* i/p fle */
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ /* i/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset + mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->auth.data.offset;
+
+ /* i/p segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ if (sess->dir == DIR_ENC) {
+ /* Digest calculation case */
+ sge->length -= sess->digest_length;
+ ip_fle->length = sym_op->auth.data.length;
+ } else {
+ /* Digest verification case */
+ sge++;
+ old_digest = (uint8_t *)(sge + 1);
+ rte_memcpy(old_digest, sym_op->auth.digest.data,
+ sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+ sge->length = sess->digest_length;
+ ip_fle->length = sym_op->auth.data.length +
+ sess->digest_length;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(ip_fle);
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+
+ return 0;
+}
+
static inline int
build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
if (likely(bpid < MAX_BPID)) {
return 0;
}
+static int
+build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, __rte_unused uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *ip_fle, *op_fle, *sge, *fle;
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct rte_mbuf *mbuf;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sym_op->m_dst)
+ mbuf = sym_op->m_dst;
+ else
+ mbuf = sym_op->m_src;
+
+ fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (!fle) {
+ DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
+ return -1;
+ }
+ memset(fle, 0, FLE_SG_MEM_SIZE);
+ /* first FLE entry used to store mbuf and session ctxt */
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+ sge = fle + 3;
+
+ flc = &priv->flc_desc[0].flc;
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* o/p fle */
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
+ op_fle->length = sym_op->cipher.data.length;
+ DPAA2_SET_FLE_SG_EXT(op_fle);
+
+ /* o/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset + mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+ mbuf = mbuf->next;
+ /* o/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
+
+ /* i/p fle */
+ mbuf = sym_op->m_src;
+ sge++;
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_VADDR_TO_IOVA(sge));
+ ip_fle->length = sess->iv.length + sym_op->cipher.data.length;
+ DPAA2_SET_FLE_SG_EXT(ip_fle);
+
+ /* i/p IV */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ DPAA2_SET_FLE_OFFSET(sge, 0);
+ sge->length = sess->iv.length;
+
+ sge++;
+
+ /* i/p 1st seg */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ mbuf->data_off);
+ sge->length = mbuf->data_len - sym_op->cipher.data.offset;
+
+ mbuf = mbuf->next;
+ /* i/p segs */
+ while (mbuf) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FLE_OFFSET(sge, mbuf->data_off);
+ sge->length = mbuf->data_len;
+ mbuf = mbuf->next;
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(ip_fle);
+
+ /* sg fd */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+ return 0;
+}
+
static int
build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
fle->length = sym_op->cipher.data.length + sess->iv.length;
- PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
- flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
fle++;
DPAA2_SET_FLE_FIN(sge);
DPAA2_SET_FLE_FIN(fle);
- PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
- (void *)DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[bpid].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
return 0;
}
dpaa2_sec_session *sess;
PMD_INIT_FUNC_TRACE();
- /*
- * Segmented buffer is not supported.
- */
- if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
- op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return -ENOTSUP;
- }
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
- sess = (dpaa2_sec_session *)get_session_private_data(
+ sess = (dpaa2_sec_session *)get_sym_session_private_data(
op->sym->session, cryptodev_driver_id);
else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
sess = (dpaa2_sec_session *)get_sec_session_private_data(
else
return -1;
- switch (sess->ctxt_type) {
- case DPAA2_SEC_CIPHER:
- ret = build_cipher_fd(sess, op, fd, bpid);
- break;
- case DPAA2_SEC_AUTH:
- ret = build_auth_fd(sess, op, fd, bpid);
- break;
- case DPAA2_SEC_AEAD:
- ret = build_authenc_gcm_fd(sess, op, fd, bpid);
- break;
- case DPAA2_SEC_CIPHER_HASH:
- ret = build_authenc_fd(sess, op, fd, bpid);
- break;
- case DPAA2_SEC_IPSEC:
- ret = build_proto_fd(sess, op, fd, bpid);
- break;
- case DPAA2_SEC_HASH_CIPHER:
- default:
- RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ /* Segmented buffer */
+ if (unlikely(!rte_pktmbuf_is_contiguous(op->sym->m_src))) {
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_sg_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ DPAA2_SEC_ERR("error: Unsupported session");
+ }
+ } else {
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_IPSEC:
+ ret = build_proto_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ DPAA2_SEC_ERR("error: Unsupported session");
+ }
}
return ret;
}
return 0;
if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
+ DPAA2_SEC_ERR("sessionless crypto op not supported");
return 0;
}
/*Prepare enqueue descriptor*/
qbman_eq_desc_set_response(&eqdesc, 0, 0);
qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
- if (!DPAA2_PER_LCORE_SEC_DPIO) {
- ret = dpaa2_affine_qbman_swp_sec();
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_SEC_ERR("Failure in affining portal");
return 0;
}
}
- swp = DPAA2_PER_LCORE_SEC_PORTAL;
+ swp = DPAA2_PER_LCORE_PORTAL;
while (nb_ops) {
frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
bpid = mempool_to_bpid(mb_pool);
ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
if (ret) {
- PMD_DRV_LOG(ERR, "error: Improper packet"
- " contents for crypto operation\n");
+ DPAA2_SEC_ERR("error: Improper packet contents"
+ " for crypto operation");
goto skip_tx;
}
ops++;
DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
- op = (struct rte_crypto_op *)mbuf->buf_iova;
+ op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
mbuf->buf_iova = op->sym->aead.digest.phys_addr;
op->sym->aead.digest.phys_addr = 0L;
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
- PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
- fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+ DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+ fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
/* we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
if (unlikely(DPAA2_GET_FD_IVP(fd))) {
/* TODO complete it. */
- RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n");
+ DPAA2_SEC_ERR("error: non inline buffer");
return NULL;
}
- op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
- DPAA2_GET_FLE_ADDR((fle - 1)));
+ op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
/* Prefeth op */
src = op->sym->m_src;
} else
dst = src;
- PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
- (void *)dst, dst->buf_addr);
-
- PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
- (void *)DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
+ " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
+ (void *)dst,
+ dst->buf_addr,
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
/* free the fle memory */
- priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
- rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
+ if (likely(rte_pktmbuf_is_contiguous(src))) {
+ priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
+ rte_mempool_put(priv->fle_pool, (void *)(fle-1));
+ } else
+ rte_free((void *)(fle-1));
return op;
}
const struct qbman_fd *fd;
struct qbman_pull_desc pulldesc;
- if (!DPAA2_PER_LCORE_SEC_DPIO) {
- ret = dpaa2_affine_qbman_swp_sec();
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_SEC_ERR("Failure in affining portal");
return 0;
}
}
- swp = DPAA2_PER_LCORE_SEC_PORTAL;
+ swp = DPAA2_PER_LCORE_PORTAL;
dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
qbman_pull_desc_clear(&pulldesc);
/*Issue a volatile dequeue command. */
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- RTE_LOG(WARNING, PMD,
- "SEC VDQ command is not issued : QBMAN busy\n");
+ DPAA2_SEC_WARN(
+ "SEC VDQ command is not issued : QBMAN busy");
/* Portal was busy, try again */
continue;
}
status = (uint8_t)qbman_result_DQ_flags(dq_storage);
if (unlikely(
(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
- PMD_RX_LOG(DEBUG, "No frame is delivered");
+ DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
continue;
}
}
if (unlikely(fd->simple.frc)) {
/* TODO Parse SEC errors */
- RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
- fd->simple.frc);
+ DPAA2_SEC_ERR("SEC returned Error - %x",
+ fd->simple.frc);
ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
} else {
ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
dpaa2_qp->rx_vq.rx_pkts += num_rx;
- PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
+ DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
/*Return the total number of packets received to DPAA2 app*/
return num_rx;
}
/* If qp is already in use free ring memory and qp metadata. */
if (dev->data->queue_pairs[qp_id] != NULL) {
- PMD_DRV_LOG(INFO, "QP already setup");
+ DPAA2_SEC_INFO("QP already setup");
return 0;
}
- PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
+ DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
dev, qp_id, qp_conf);
memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
RTE_CACHE_LINE_SIZE);
if (!qp) {
- RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
+ DPAA2_SEC_ERR("malloc failed for rx/tx queues");
return -1;
}
sizeof(struct queue_storage_info_t),
RTE_CACHE_LINE_SIZE);
if (!qp->rx_vq.q_storage) {
- RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
+ DPAA2_SEC_ERR("malloc failed for q_storage");
return -1;
}
memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
- RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
+ DPAA2_SEC_ERR("Unable to allocate dequeue storage");
return -1;
}
dev->data->queue_pairs[qp_id] = qp;
cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
- cfg.user_ctx = (uint64_t)(&qp->rx_vq);
+ cfg.user_ctx = (size_t)(&qp->rx_vq);
retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
qp_id, &cfg);
return retcode;
}
-/** Start queue pair */
-static int
-dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
-/** Stop queue pair */
-static int
-dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
/** Returns the size of the aesni gcm session structure */
static unsigned int
-dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
PMD_INIT_FUNC_TRACE();
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -1;
}
memcpy(session->cipher_key.data, xform->cipher.key.data,
xform->cipher.key.length);
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_ZUC_EEA3:
case RTE_CRYPTO_CIPHER_NULL:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
xform->cipher.algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
xform->cipher.algo);
goto error_out;
}
&cipherdata, NULL, session->iv.length,
session->dir);
if (bufsize < 0) {
- RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
+ DPAA2_SEC_ERR("Crypto: Descriptor build failed");
goto error_out;
}
flc->dhr = 0;
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
- i, priv->flc_desc[0].desc[i]);
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
return 0;
{
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("Unable to allocate memory for auth key");
rte_free(priv);
return -1;
}
memcpy(session->auth_key.data, xform->auth.key.data,
xform->auth.key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- xform->auth.algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
+ xform->auth.algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- xform->auth.algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ xform->auth.algo);
goto error_out;
}
session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1, 0, &authdata, !session->dir,
session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
- i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
+ i, priv->flc_desc[DESC_INITFINAL].desc[i]);
return 0;
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo aeaddata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for aead key\n");
+ DPAA2_SEC_ERR("No Memory for aead key");
rte_free(priv);
return -1;
}
session->aead_key.length = aead_xform->key.length;
ctxt->auth_only_len = aead_xform->aad_length;
- aeaddata.key = (uint64_t)session->aead_key.data;
+ aeaddata.key = (size_t)session->aead_key.data;
aeaddata.keylen = session->aead_key.length;
aeaddata.key_enc_flags = 0;
aeaddata.key_type = RTA_DATA_IMM;
case RTE_CRYPTO_AEAD_AES_GCM:
aeaddata.algtype = OP_ALG_ALGSEL_AES;
aeaddata.algmode = OP_ALG_AAI_GCM;
- session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
- aead_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
+ aead_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
- aead_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
+ aead_xform->algo);
goto error_out;
}
session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
&priv->flc_desc[0].desc[1], 1);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
goto error_out;
}
if (priv->flc_desc[0].desc[1] & 1) {
priv->flc_desc[0].desc, 1, 0,
&aeaddata, session->iv.length,
session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
+
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
i, priv->flc_desc[0].desc[i]);
return 0;
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata, cipherdata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_cipher_xform *cipher_xform;
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -1;
}
session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
rte_free(priv);
return -1;
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
goto error_out;
}
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
goto error_out;
}
session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
&priv->flc_desc[0].desc[2], 2);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
goto error_out;
}
if (priv->flc_desc[0].desc[2] & 1) {
ctxt->auth_only_len,
session->digest_length,
session->dir);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
} else {
- RTE_LOG(ERR, PMD, "Hash before cipher not supported\n");
+ DPAA2_SEC_ERR("Hash before cipher not supported");
goto error_out;
}
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
i, priv->flc_desc[0].desc[i]);
return 0;
PMD_INIT_FUNC_TRACE();
if (unlikely(sess == NULL)) {
- RTE_LOG(ERR, PMD, "invalid session struct\n");
+ DPAA2_SEC_ERR("Invalid session struct");
return -1;
}
dpaa2_sec_aead_init(dev, xform, session);
} else {
- RTE_LOG(ERR, PMD, "Invalid crypto type\n");
+ DPAA2_SEC_ERR("Invalid crypto type");
return -EINVAL;
}
struct ipsec_encap_pdb encap_pdb;
struct ipsec_decap_pdb decap_pdb;
struct alginfo authdata, cipherdata;
- unsigned int bufsize;
+ int bufsize;
struct sec_flow_context *flc;
PMD_INIT_FUNC_TRACE();
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "\nNo memory for priv CTXT");
+ DPAA2_SEC_ERR("No memory for priv CTXT");
return -ENOMEM;
}
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL &&
cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -ENOMEM;
}
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL &&
auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
rte_free(priv);
return -ENOMEM;
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
goto out;
}
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
goto out;
}
1, 0, &decap_pdb, &cipherdata, &authdata);
} else
goto out;
+
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto out;
+ }
+
flc->word1_sdl = (uint8_t)bufsize;
/* Enable the stashing control bit */
DPAA2_SET_FLC_RSC(flc);
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq) | 0x14);
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
/* Set EWS bit i.e. enable write-safe */
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
return -EINVAL;
}
if (ret != 0) {
- PMD_DRV_LOG(ERR,
- "DPAA2 PMD: failed to configure session parameters");
-
+ DPAA2_SEC_ERR("Failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
static int
-dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
- "session parameters");
-
+ DPAA2_SEC_ERR("Failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
/** Clear the memory of session so it doesn't leave key material behind */
static void
-dpaa2_sec_session_clear(struct rte_cryptodev *dev,
+dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
PMD_INIT_FUNC_TRACE();
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
if (sess_priv) {
rte_free(s->auth_key.data);
memset(sess, 0, sizeof(dpaa2_sec_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
- priv->hw_id);
+ DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
+ priv->hw_id);
goto get_attr_failure;
}
ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
if (ret) {
- PMD_INIT_LOG(ERR,
- "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
+ DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
goto get_attr_failure;
}
for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
&rx_attr);
dpaa2_q->fqid = rx_attr.fqid;
- PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
+ DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
}
for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
dpaa2_q = &qp[i]->tx_vq;
dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
&tx_attr);
dpaa2_q->fqid = tx_attr.fqid;
- PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
+ DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
}
return 0;
ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
+ DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
priv->hw_id);
return;
}
ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
if (ret < 0) {
- PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
- ret);
+ DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
return;
}
}
/*Close the device at underlying layer*/
ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
- " error code %d\n", ret);
+ DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
return -1;
}
info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
info->feature_flags = dev->feature_flags;
info->capabilities = dpaa2_sec_capabilities;
- info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
info->driver_id = cryptodev_driver_id;
}
}
PMD_INIT_FUNC_TRACE();
if (stats == NULL) {
- PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
+ DPAA2_SEC_ERR("Invalid stats ptr NULL");
return;
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
continue;
}
ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
&counters);
if (ret) {
- PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
+ DPAA2_SEC_ERR("SEC counters failed");
} else {
- PMD_DRV_LOG(INFO, "dpseci hw stats:"
- "\n\tNumber of Requests Dequeued = %lu"
- "\n\tNumber of Outbound Encrypt Requests = %lu"
- "\n\tNumber of Inbound Decrypt Requests = %lu"
- "\n\tNumber of Outbound Bytes Encrypted = %lu"
- "\n\tNumber of Outbound Bytes Protected = %lu"
- "\n\tNumber of Inbound Bytes Decrypted = %lu"
- "\n\tNumber of Inbound Bytes Validated = %lu",
+ DPAA2_SEC_INFO("dpseci hardware stats:"
+ "\n\tNum of Requests Dequeued = %" PRIu64
+ "\n\tNum of Outbound Encrypt Requests = %" PRIu64
+ "\n\tNum of Inbound Decrypt Requests = %" PRIu64
+ "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
+ "\n\tNum of Outbound Bytes Protected = %" PRIu64
+ "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
+ "\n\tNum of Inbound Bytes Validated = %" PRIu64,
counters.dequeued_requests,
counters.ob_enc_requests,
counters.ib_dec_requests,
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
continue;
}
qp[i]->tx_vq.rx_pkts = 0;
.stats_reset = dpaa2_sec_stats_reset,
.queue_pair_setup = dpaa2_sec_queue_pair_setup,
.queue_pair_release = dpaa2_sec_queue_pair_release,
- .queue_pair_start = dpaa2_sec_queue_pair_start,
- .queue_pair_stop = dpaa2_sec_queue_pair_stop,
.queue_pair_count = dpaa2_sec_queue_pair_count,
- .session_get_size = dpaa2_sec_session_get_size,
- .session_configure = dpaa2_sec_session_configure,
- .session_clear = dpaa2_sec_session_clear,
+ .sym_session_get_size = dpaa2_sec_sym_session_get_size,
+ .sym_session_configure = dpaa2_sec_sym_session_configure,
+ .sym_session_clear = dpaa2_sec_sym_session_clear,
};
static const struct rte_security_capability *
rte_mempool_free(internals->fle_pool);
- PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
- dev->data->name, rte_socket_id());
+ DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
return 0;
}
PMD_INIT_FUNC_TRACE();
dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
if (dpaa2_dev == NULL) {
- PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
+ DPAA2_SEC_ERR("DPAA2 SEC device not found");
return -1;
}
hw_id = dpaa2_dev->object_id;
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_SECURITY;
+ RTE_CRYPTODEV_FF_SECURITY |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
internals = cryptodev->data->dev_private;
- internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
/*
* For secondary processes, we don't initialise any further as primary
* RX function
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+ DPAA2_SEC_DEBUG("Device already init by primary process");
return 0;
}
dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
sizeof(struct fsl_mc_io), 0);
if (!dpseci) {
- PMD_INIT_LOG(ERR,
- "Error in allocating the memory for dpsec object");
+ DPAA2_SEC_ERR(
+ "Error in allocating the memory for dpsec object");
return -1;
}
dpseci->regs = rte_mcp_ptr_list[0];
retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
if (retcode != 0) {
- PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
- retcode);
+ DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
+ retcode);
goto init_error;
}
retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
if (retcode != 0) {
- PMD_INIT_LOG(ERR,
+ DPAA2_SEC_ERR(
"Cannot get dpsec device attributed: Error = %x",
retcode);
goto init_error;
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);
if (!internals->fle_pool) {
- RTE_LOG(ERR, PMD, "%s create failed\n", str);
+ DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
goto init_error;
}
- PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
return 0;
init_error:
- PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+ DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
/* dpaa2_sec_uninit(crypto_dev_name); */
return -EFAULT;
}
static int
-cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
+cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
struct rte_dpaa2_device *dpaa2_dev)
{
struct rte_cryptodev *cryptodev;
if (cryptodev->data->dev_private == NULL)
rte_panic("Cannot allocate memzone for private "
- "device data");
+ "device data");
}
dpaa2_dev->cryptodev = cryptodev;
cryptodev->device = &dpaa2_dev->device;
- cryptodev->device->driver = &dpaa2_drv->driver;
/* init user callbacks */
TAILQ_INIT(&(cryptodev->link_intr_cbs));
static struct cryptodev_driver dpaa2_sec_crypto_drv;
RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
+ rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
+
+RTE_INIT(dpaa2_sec_init_log)
+{
+ /* Bus level logs */
+ dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
+ if (dpaa2_logtype_sec >= 0)
+ rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
+}