#define NPS_PKT_IN_INSTR_SIZE 64
#define IV_FROM_DPTR 1
#define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33
+#define FLEXI_CRYPTO_MAX_AAD_LEN 512
#define AES_KEYSIZE_128 16
#define AES_KEYSIZE_192 24
#define AES_KEYSIZE_256 32
}
}
break;
+ case RTE_CRYPTO_SYM_XFORM_AEAD:
+ res = NITROX_CHAIN_COMBINED;
+ break;
default:
break;
}
}
static bool
-auth_key_digest_is_valid(struct rte_crypto_auth_xform *xform,
- struct flexi_crypto_context *fctx)
+auth_key_is_valid(const uint8_t *data, uint16_t length,
+ struct flexi_crypto_context *fctx)
{
- if (unlikely(!xform->key.data && xform->key.length)) {
+ if (unlikely(!data && length)) {
NITROX_LOG(ERR, "Invalid auth key\n");
return false;
}
- if (unlikely(xform->key.length > sizeof(fctx->auth.opad))) {
+ if (unlikely(length > sizeof(fctx->auth.opad))) {
NITROX_LOG(ERR, "Invalid auth key length %d\n",
- xform->key.length);
+ length);
return false;
}
if (unlikely(type == AUTH_INVALID))
return -ENOTSUP;
- if (unlikely(!auth_key_digest_is_valid(xform, fctx)))
+ if (unlikely(!auth_key_is_valid(xform->key.data, xform->key.length,
+ fctx)))
return -EINVAL;
- ctx->auth_op = xform->op;
- ctx->auth_algo = xform->algo;
ctx->digest_length = xform->digest_length;
fctx->flags = rte_be_to_cpu_64(fctx->flags);
return 0;
}
+static int
+configure_aead_ctx(struct rte_crypto_aead_xform *xform,
+ struct nitrox_crypto_ctx *ctx)
+{
+ int aes_keylen;
+ struct flexi_crypto_context *fctx = &ctx->fctx;
+
+ if (unlikely(xform->aad_length > FLEXI_CRYPTO_MAX_AAD_LEN)) {
+ NITROX_LOG(ERR, "AAD length %d not supported\n",
+ xform->aad_length);
+ return -ENOTSUP;
+ }
+
+ if (unlikely(xform->algo != RTE_CRYPTO_AEAD_AES_GCM))
+ return -ENOTSUP;
+
+ aes_keylen = flexi_aes_keylen(xform->key.length, true);
+ if (unlikely(aes_keylen < 0))
+ return -EINVAL;
+
+ if (unlikely(!auth_key_is_valid(xform->key.data, xform->key.length,
+ fctx)))
+ return -EINVAL;
+
+ if (unlikely(xform->iv.length > MAX_IV_LEN))
+ return -EINVAL;
+
+ fctx->flags = rte_be_to_cpu_64(fctx->flags);
+ fctx->w0.cipher_type = CIPHER_AES_GCM;
+ fctx->w0.aes_keylen = aes_keylen;
+ fctx->w0.iv_source = IV_FROM_DPTR;
+ fctx->w0.hash_type = AUTH_NULL;
+ fctx->w0.auth_input_type = 1;
+ fctx->w0.mac_len = xform->digest_length;
+ fctx->flags = rte_cpu_to_be_64(fctx->flags);
+ memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key));
+ memcpy(fctx->crypto.key, xform->key.data, xform->key.length);
+ memset(&fctx->auth, 0, sizeof(fctx->auth));
+ memcpy(fctx->auth.opad, xform->key.data, xform->key.length);
+
+ ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
+ ctx->req_op = (xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT;
+ ctx->iv.offset = xform->iv.offset;
+ ctx->iv.length = xform->iv.length;
+ ctx->digest_length = xform->digest_length;
+ ctx->aad_length = xform->aad_length;
+ return 0;
+}
+
static int
nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev,
struct rte_crypto_sym_xform *xform,
struct nitrox_crypto_ctx *ctx;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_aead_xform *aead_xform = NULL;
+ int ret = -EINVAL;
if (rte_mempool_get(mempool, &mp_obj)) {
NITROX_LOG(ERR, "Couldn't allocate context\n");
auth_xform = &xform->auth;
cipher_xform = &xform->next->cipher;
break;
+ case NITROX_CHAIN_COMBINED:
+ aead_xform = &xform->aead;
+ break;
default:
NITROX_LOG(ERR, "Crypto chain not supported\n");
+ ret = -ENOTSUP;
goto err;
}
goto err;
}
+ if (aead_xform && unlikely(configure_aead_ctx(aead_xform, ctx))) {
+ NITROX_LOG(ERR, "Failed to configure aead ctx\n");
+ goto err;
+ }
+
ctx->iova = rte_mempool_virt2iova(ctx);
set_sym_session_private_data(sess, cdev->driver_id, ctx);
return 0;
err:
rte_mempool_put(mempool, mp_obj);
- return -EINVAL;
+ return ret;
}
static void
}
static void
-softreq_copy_iv(struct nitrox_softreq *sr)
+softreq_copy_iv(struct nitrox_softreq *sr, uint8_t salt_size)
{
- sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *,
- sr->ctx->iv.offset);
- sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op, sr->ctx->iv.offset);
- sr->iv.len = sr->ctx->iv.length;
+ uint16_t offset = sr->ctx->iv.offset + salt_size;
+
+ sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *, offset);
+ sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op, offset);
+ sr->iv.len = sr->ctx->iv.length - salt_size;
}
static int
struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
op->sym->m_src;
- if (sr->ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY &&
+ if (sr->ctx->req_op == NITROX_OP_DECRYPT &&
unlikely(!op->sym->auth.digest.data))
return -EINVAL;
if (unlikely(auth_only_len < 0))
return -EINVAL;
+ if (unlikely(
+ op->sym->cipher.data.offset + op->sym->cipher.data.length !=
+ op->sym->auth.data.offset + op->sym->auth.data.length)) {
+ NITROX_LOG(ERR, "Auth only data after cipher data not supported\n");
+ return -ENOTSUP;
+ }
+
err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->auth.data.offset,
auth_only_len);
if (unlikely(err))
return 0;
}
+static int
+create_combined_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl,
+ struct rte_mbuf *mbuf)
+{
+ struct rte_crypto_op *op = sr->op;
+
+ fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
+ fill_sglist(sgtbl, sr->ctx->aad_length, op->sym->aead.aad.phys_addr,
+ op->sym->aead.aad.data);
+ return create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
+ op->sym->cipher.data.length);
+}
+
+static int
+create_aead_sglist(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl,
+ struct rte_mbuf *mbuf)
+{
+ int err;
+
+ switch (sr->ctx->nitrox_chain) {
+ case NITROX_CHAIN_CIPHER_AUTH:
+ case NITROX_CHAIN_AUTH_CIPHER:
+ err = create_cipher_auth_sglist(sr, sgtbl, mbuf);
+ break;
+ case NITROX_CHAIN_COMBINED:
+ err = create_combined_sglist(sr, sgtbl, mbuf);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
static void
create_sgcomp(struct nitrox_sgtable *sgtbl)
{
}
static int
-create_cipher_auth_inbuf(struct nitrox_softreq *sr,
- struct nitrox_sglist *digest)
+create_aead_inbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
{
int err;
struct nitrox_crypto_ctx *ctx = sr->ctx;
- err = create_cipher_auth_sglist(sr, &sr->in, sr->op->sym->m_src);
+ err = create_aead_sglist(sr, &sr->in, sr->op->sym->m_src);
if (unlikely(err))
return err;
- if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ if (ctx->req_op == NITROX_OP_DECRYPT)
fill_sglist(&sr->in, digest->len, digest->iova, digest->virt);
create_sgcomp(&sr->in);
}
static int
-create_cipher_auth_oop_outbuf(struct nitrox_softreq *sr,
- struct nitrox_sglist *digest)
+create_aead_oop_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
{
int err;
struct nitrox_crypto_ctx *ctx = sr->ctx;
- err = create_cipher_auth_sglist(sr, &sr->out, sr->op->sym->m_dst);
+ err = create_aead_sglist(sr, &sr->out, sr->op->sym->m_dst);
if (unlikely(err))
return err;
- if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ if (ctx->req_op == NITROX_OP_ENCRYPT)
fill_sglist(&sr->out, digest->len, digest->iova, digest->virt);
return 0;
}
static void
-create_cipher_auth_inplace_outbuf(struct nitrox_softreq *sr,
- struct nitrox_sglist *digest)
+create_aead_inplace_outbuf(struct nitrox_softreq *sr,
+ struct nitrox_sglist *digest)
{
int i, cnt;
struct nitrox_crypto_ctx *ctx = sr->ctx;
}
sr->out.map_bufs_cnt = cnt;
- if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ if (ctx->req_op == NITROX_OP_ENCRYPT) {
fill_sglist(&sr->out, digest->len, digest->iova,
digest->virt);
- } else if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ } else if (ctx->req_op == NITROX_OP_DECRYPT) {
sr->out.map_bufs_cnt--;
}
}
static int
-create_cipher_auth_outbuf(struct nitrox_softreq *sr,
- struct nitrox_sglist *digest)
+create_aead_outbuf(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
{
struct rte_crypto_op *op = sr->op;
int cnt = 0;
if (op->sym->m_dst) {
int err;
- err = create_cipher_auth_oop_outbuf(sr, digest);
+ err = create_aead_oop_outbuf(sr, digest);
if (unlikely(err))
return err;
} else {
- create_cipher_auth_inplace_outbuf(sr, digest);
+ create_aead_inplace_outbuf(sr, digest);
}
cnt = sr->out.map_bufs_cnt;
int err;
struct nitrox_sglist digest;
- softreq_copy_iv(sr);
+ softreq_copy_iv(sr, 0);
err = extract_cipher_auth_digest(sr, &digest);
if (unlikely(err))
return err;
- err = create_cipher_auth_inbuf(sr, &digest);
+ err = create_aead_inbuf(sr, &digest);
if (unlikely(err))
return err;
- err = create_cipher_auth_outbuf(sr, &digest);
+ err = create_aead_outbuf(sr, &digest);
if (unlikely(err))
return err;
return 0;
}
+static int
+softreq_copy_salt(struct nitrox_softreq *sr)
+{
+ struct nitrox_crypto_ctx *ctx = sr->ctx;
+ uint8_t *addr;
+
+ if (unlikely(ctx->iv.length < AES_GCM_SALT_SIZE)) {
+ NITROX_LOG(ERR, "Invalid IV length %d\n", ctx->iv.length);
+ return -EINVAL;
+ }
+
+ addr = rte_crypto_op_ctod_offset(sr->op, uint8_t *, ctx->iv.offset);
+ if (!memcmp(ctx->salt, addr, AES_GCM_SALT_SIZE))
+ return 0;
+
+ memcpy(ctx->salt, addr, AES_GCM_SALT_SIZE);
+ memcpy(ctx->fctx.crypto.iv, addr, AES_GCM_SALT_SIZE);
+ return 0;
+}
+
+static int
+extract_combined_digest(struct nitrox_softreq *sr, struct nitrox_sglist *digest)
+{
+ struct rte_crypto_op *op = sr->op;
+ struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
+ op->sym->m_src;
+
+ digest->len = sr->ctx->digest_length;
+ if (op->sym->aead.digest.data) {
+ digest->iova = op->sym->aead.digest.phys_addr;
+ digest->virt = op->sym->aead.digest.data;
+
+ return 0;
+ }
+
+ if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->aead.data.offset +
+ op->sym->aead.data.length + digest->len))
+ return -EINVAL;
+
+ digest->iova = rte_pktmbuf_iova_offset(mdst,
+ op->sym->aead.data.offset +
+ op->sym->aead.data.length);
+ digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
+ op->sym->aead.data.offset +
+ op->sym->aead.data.length);
+
+ return 0;
+}
+
+static int
+process_combined_data(struct nitrox_softreq *sr)
+{
+ int err;
+ struct nitrox_sglist digest;
+ struct rte_crypto_op *op = sr->op;
+
+ err = softreq_copy_salt(sr);
+ if (unlikely(err))
+ return err;
+
+ softreq_copy_iv(sr, AES_GCM_SALT_SIZE);
+ err = extract_combined_digest(sr, &digest);
+ if (unlikely(err))
+ return err;
+
+ err = create_aead_inbuf(sr, &digest);
+ if (unlikely(err))
+ return err;
+
+ err = create_aead_outbuf(sr, &digest);
+ if (unlikely(err))
+ return err;
+
+ create_aead_gph(op->sym->aead.data.length, sr->iv.len,
+ op->sym->aead.data.length + sr->ctx->aad_length,
+ &sr->gph);
+
+ return 0;
+}
+
static int
process_softreq(struct nitrox_softreq *sr)
{
case NITROX_CHAIN_AUTH_CIPHER:
err = process_cipher_auth_data(sr);
break;
+ case NITROX_CHAIN_COMBINED:
+ err = process_combined_data(sr);
+ break;
default:
err = -EINVAL;
break;
struct nitrox_crypto_ctx *ctx,
struct nitrox_softreq *sr)
{
+ int err;
+
softreq_init(sr, sr->iova);
sr->ctx = ctx;
sr->op = op;
- process_softreq(sr);
+ err = process_softreq(sr);
+ if (unlikely(err))
+ return err;
+
create_se_instr(sr, qno);
sr->timeout = rte_get_timer_cycles() + CMD_TIMEOUT * rte_get_timer_hz();
return 0;
cc = *(volatile uint64_t *)(&sr->resp.completion);
orh = *(volatile uint64_t *)(&sr->resp.orh);
if (cc != PENDING_SIG)
- err = 0;
+ err = orh & 0xff;
else if ((orh != PENDING_SIG) && (orh & 0xff))
err = orh & 0xff;
else if (rte_get_timer_cycles() >= sr->timeout)