0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC
};
-static __rte_always_inline int
-cpt_is_algo_supported(struct rte_crypto_sym_xform *xform)
-{
- /*
- * Microcode only supports the following combination.
- * Encryption followed by authentication
- * Authentication followed by decryption
- */
- if (xform->next) {
- if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
- (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
- (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
- /* Unsupported as of now by microcode */
- CPT_LOG_DP_ERR("Unsupported combination");
- return -1;
- }
- if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
- (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
- (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
- /* For GMAC auth there is no cipher operation */
- if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
- xform->next->auth.algo !=
- RTE_CRYPTO_AUTH_AES_GMAC) {
- /* Unsupported as of now by microcode */
- CPT_LOG_DP_ERR("Unsupported combination");
- return -1;
- }
- }
- }
- return 0;
-}
-
static __rte_always_inline void
-gen_key_snow3g(uint8_t *ck, uint32_t *keyx)
+gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
{
int i, base;
}
}
+static __rte_always_inline int
+cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
+{
+ uint16_t mac_len = auth->digest_length;
+ int ret;
+
+ switch (auth->algo) {
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ ret = (mac_len == 16) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ ret = (mac_len == 20) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ ret = (mac_len == 28) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ ret = (mac_len == 32) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ ret = (mac_len == 48) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ ret = (mac_len == 64) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ ret = 0;
+ break;
+ default:
+ ret = -1;
+ }
+
+ return ret;
+}
+
static __rte_always_inline void
-cpt_fc_salt_update(void *ctx,
+cpt_fc_salt_update(struct cpt_ctx *cpt_ctx,
uint8_t *salt)
{
- struct cpt_ctx *cpt_ctx = ctx;
- memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
+ mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
+ memcpy(fctx->enc.encr_iv, salt, 4);
}
static __rte_always_inline int
cpt_fc_ciph_validate_key_aes(uint16_t key_len)
{
switch (key_len) {
- case CPT_BYTE_16:
- case CPT_BYTE_24:
- case CPT_BYTE_32:
+ case 16:
+ case 24:
+ case 32:
return 0;
default:
return -1;
}
static __rte_always_inline int
-cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
- uint16_t key_len)
+cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
{
int fc_type = 0;
switch (type) {
return -1;
fc_type = FC_GEN;
break;
+ case CHACHA20:
+ fc_type = FC_GEN;
+ break;
case AES_XTS:
key_len = key_len / 2;
- if (unlikely(key_len == CPT_BYTE_24)) {
+ if (unlikely(key_len == 24)) {
CPT_LOG_DP_ERR("Invalid AES key len for XTS");
return -1;
}
if (unlikely(key_len != 16))
return -1;
/* No support for AEAD yet */
- if (unlikely(cpt_ctx->hash_type))
+ if (unlikely(ctx->hash_type))
return -1;
fc_type = ZUC_SNOW3G;
break;
if (unlikely(key_len != 16))
return -1;
/* No support for AEAD yet */
- if (unlikely(cpt_ctx->hash_type))
+ if (unlikely(ctx->hash_type))
return -1;
fc_type = KASUMI;
break;
default:
return -1;
}
- return fc_type;
+
+ ctx->fc_type = fc_type;
+ return 0;
}
static __rte_always_inline void
cpt_fc_ciph_set_key_passthrough(struct cpt_ctx *cpt_ctx, mc_fc_context_t *fctx)
{
cpt_ctx->enc_cipher = 0;
- CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
+ fctx->enc.enc_cipher = 0;
}
static __rte_always_inline void
{
mc_aes_type_t aes_key_type = 0;
switch (key_len) {
- case CPT_BYTE_16:
+ case 16:
aes_key_type = AES_128_BIT;
break;
- case CPT_BYTE_24:
+ case 24:
aes_key_type = AES_192_BIT;
break;
- case CPT_BYTE_32:
+ case 32:
aes_key_type = AES_256_BIT;
break;
default:
CPT_LOG_DP_ERR("Invalid AES key len");
return;
}
- CPT_P_ENC_CTRL(fctx).aes_key = aes_key_type;
+ fctx->enc.aes_key = aes_key_type;
}
static __rte_always_inline void
-cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, uint8_t *key,
+cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
+ mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
uint32_t keyx[4];
+
cpt_ctx->snow3g = 1;
gen_key_snow3g(key, keyx);
- memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
- cpt_ctx->fc_type = ZUC_SNOW3G;
+ memcpy(zs_ctx->ci_key, keyx, key_len);
cpt_ctx->zsk_flags = 0;
}
static __rte_always_inline void
-cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, uint8_t *key,
+cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
+ mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
+
cpt_ctx->snow3g = 0;
- memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
- memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
- cpt_ctx->fc_type = ZUC_SNOW3G;
+ memcpy(zs_ctx->ci_key, key, key_len);
+ memcpy(zs_ctx->zuc_const, zuc_d, 32);
cpt_ctx->zsk_flags = 0;
}
static __rte_always_inline void
-cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, uint8_t *key,
+cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
+ mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
+
cpt_ctx->k_ecb = 1;
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->zsk_flags = 0;
- cpt_ctx->fc_type = KASUMI;
}
static __rte_always_inline void
-cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, uint8_t *key,
+cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
+
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->zsk_flags = 0;
- cpt_ctx->fc_type = KASUMI;
}
static __rte_always_inline int
-cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, uint8_t *key,
- uint16_t key_len, uint8_t *salt)
+cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
+ const uint8_t *key, uint16_t key_len, uint8_t *salt)
{
- struct cpt_ctx *cpt_ctx = ctx;
- mc_fc_context_t *fctx = &cpt_ctx->fctx;
- uint64_t *ctrl_flags = NULL;
- int fc_type;
-
- /* Validate key before proceeding */
- fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len);
- if (unlikely(fc_type == -1))
+ mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
+ int ret;
+
+ ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
+ if (unlikely(ret))
return -1;
- if (fc_type == FC_GEN) {
- cpt_ctx->fc_type = FC_GEN;
- ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
- *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
+ if (cpt_ctx->fc_type == FC_GEN) {
/*
* We need to always say IV is from DPTR as user can
* sometimes iverride IV per operation.
*/
- CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_DPTR;
+ fctx->enc.iv_source = CPT_FROM_DPTR;
+
+ if (cpt_ctx->auth_key_len > 64)
+ return -1;
}
switch (type) {
case PASSTHROUGH:
cpt_fc_ciph_set_key_passthrough(cpt_ctx, fctx);
- goto fc_success;
+ goto success;
case DES3_CBC:
/* CPT performs DES using 3DES with the 8B DES-key
* replicated 2 more times to match the 24B 3DES-key.
break;
case DES3_ECB:
/* For DES3_ECB IV need to be from CTX. */
- CPT_P_ENC_CTRL(fctx).iv_source = CPT_FROM_CTX;
+ fctx->enc.iv_source = CPT_FROM_CTX;
break;
case AES_CBC:
case AES_ECB:
case AES_CFB:
case AES_CTR:
+ case CHACHA20:
cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
break;
case AES_GCM:
* and nothing else
*/
if (!key)
- goto fc_success;
+ goto success;
}
cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
break;
cpt_fc_ciph_set_key_kasumi_f8_cbc(cpt_ctx, key, key_len);
goto success;
default:
- break;
+ return -1;
}
/* Only for FC_GEN case */
/* For GMAC auth, cipher must be NULL */
if (cpt_ctx->hash_type != GMAC_TYPE)
- CPT_P_ENC_CTRL(fctx).enc_cipher = type;
+ fctx->enc.enc_cipher = type;
memcpy(fctx->enc.encr_key, key, key_len);
-fc_success:
- *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
-
success:
cpt_ctx->enc_cipher = type;
{
int32_t j;
uint32_t extra_len = extra_buf ? extra_buf->size : 0;
- uint32_t size = *psize - extra_len;
+ uint32_t size = *psize;
buf_ptr_t *bufs;
bufs = from->bufs;
uint32_t e_len;
sg_comp_t *to = &list[i >> 2];
- if (!bufs[j].size)
- continue;
-
if (unlikely(from_offset)) {
if (from_offset >= bufs[j].size) {
from_offset -= bufs[j].size;
to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
}
+ extra_len = RTE_MIN(extra_len, size);
/* Insert extra data ptr */
if (extra_len) {
i++;
to = &list[i >> 2];
to->u.s.len[i % 4] =
- rte_cpu_to_be_16(extra_buf->size);
+ rte_cpu_to_be_16(extra_len);
to->ptr[i % 4] =
rte_cpu_to_be_64(extra_buf->dma_addr);
-
- /* size already decremented by extra len */
+ size -= extra_len;
}
+ next_len = RTE_MIN(next_len, size);
/* insert the rest of the data */
if (next_len) {
i++;
return (uint32_t)i;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_digest_gen_prep(uint32_t flags,
uint64_t d_lens,
digest_params_t *params,
{
struct cpt_request_info *req;
uint32_t size, i;
- int32_t m_size;
uint16_t data_len, mac_len, key_len;
auth_type_t hash_type;
buf_ptr_t *meta_p;
uint32_t g_size_bytes, s_size_bytes;
uint64_t dptr_dma, rptr_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr, *m_vaddr;
uint64_t c_dma, m_dma;
- opcode_info_t opcode;
-
- if (!params || !params->ctx_buf.vaddr)
- return ERR_BAD_INPUT_ARG;
ctx = params->ctx_buf.vaddr;
meta_p = ¶ms->meta_buf;
- if (!meta_p->vaddr || !meta_p->dma_addr)
- return ERR_BAD_INPUT_ARG;
-
- if (meta_p->size < sizeof(struct cpt_request_info))
- return ERR_BAD_INPUT_ARG;
-
m_vaddr = meta_p->vaddr;
m_dma = meta_p->dma_addr;
- m_size = meta_p->size;
/*
* Save initial space that followed app data for completion code &
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
req = m_vaddr;
size = sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
hash_type = ctx->hash_type;
mac_len = ctx->mac_len;
data_len = AUTH_DLEN(d_lens);
/*GP op header */
- vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param2 = rte_cpu_to_be_16(((uint16_t)hash_type << 8));
+ vq_cmd_w0.s.opcode.minor = 0;
+ vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
if (ctx->hmac) {
- opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(key_len);
- vq_cmd_w0.s.dlen =
- rte_cpu_to_be_16((data_len + ROUNDUP8(key_len)));
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
+ vq_cmd_w0.s.param1 = key_len;
+ vq_cmd_w0.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
} else {
- opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
vq_cmd_w0.s.param1 = 0;
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(data_len);
+ vq_cmd_w0.s.dlen = data_len;
}
- opcode.s.minor = 0;
-
/* Null auth only case enters the if */
if (unlikely(!hash_type && !ctx->enc_cipher)) {
- opcode.s.major = CPT_MAJOR_OP_MISC;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_MISC;
/* Minor op is passthrough */
- opcode.s.minor = 0x03;
+ vq_cmd_w0.s.opcode.minor = 0x03;
/* Send out completion code only */
vq_cmd_w0.s.param2 = 0x1;
}
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
-
/* DPTR has SG list */
in_buffer = m_vaddr;
dptr_dma = m_dma;
uint64_t k_dma = params->ctx_buf.dma_addr +
offsetof(struct cpt_ctx, auth_key);
/* Key */
- i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
+ i = fill_sg_comp(gather_comp, i, k_dma,
+ RTE_ALIGN_CEIL(key_len, 8));
}
/* input data */
if (size) {
i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
0, &size, NULL, 0);
- if (size) {
+ if (unlikely(size)) {
CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
" by %dB", size);
- return ERR_BAD_INPUT_ARG;
+ return;
}
} else {
/*
scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
if (flags & VALID_MAC_BUF) {
- if (params->mac_buf.size < mac_len)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(params->mac_buf.size < mac_len)) {
+ CPT_LOG_DP_ERR("Insufficient MAC size");
+ return;
+ }
size = mac_len;
i = fill_sg_comp_from_buf_min(scatter_comp, i,
i = fill_sg_comp_from_iov(scatter_comp, i,
params->src_iov, data_len,
&size, NULL, 0);
- if (size) {
- CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short by"
- " %dB", size);
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
+ " %dB", size);
+ return;
}
}
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_enc_hmac_prep(uint32_t flags,
uint64_t d_offs,
uint64_t d_lens,
uint32_t encr_data_len, auth_data_len, aad_len = 0;
uint32_t passthrough_len = 0;
void *m_vaddr, *offset_vaddr;
- uint64_t m_dma, offset_dma, ctx_dma;
+ uint64_t m_dma, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr;
uint64_t c_dma;
- int32_t m_size;
- opcode_info_t opcode;
meta_p = &fc_params->meta_buf;
m_vaddr = meta_p->vaddr;
m_dma = meta_p->dma_addr;
- m_size = meta_p->size;
encr_offset = ENCR_OFFSET(d_offs);
auth_offset = AUTH_OFFSET(d_offs);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* start cpt request info struct at 8 byte boundary */
size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
size += sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
-
- if (hash_type == GMAC_TYPE)
- encr_data_len = 0;
if (unlikely(!(flags & VALID_IV_BUF))) {
iv_len = 0;
}
/* Encryption */
- opcode.s.major = CPT_MAJOR_OP_FC;
- opcode.s.minor = 0;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
+ vq_cmd_w0.s.opcode.minor = 0;
+
+ if (hash_type == GMAC_TYPE) {
+ encr_offset = 0;
+ encr_data_len = 0;
+ }
auth_dlen = auth_offset + auth_data_len;
enc_dlen = encr_data_len + encr_offset;
if (unlikely(encr_data_len & 0xf)) {
if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
- enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
+ enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
+ encr_offset;
else if (likely((cipher_type == AES_CBC) ||
(cipher_type == AES_ECB)))
- enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
- }
-
- if (unlikely(hash_type == GMAC_TYPE)) {
- encr_offset = auth_dlen;
- enc_dlen = 0;
+ enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
+ encr_offset;
}
if (unlikely(auth_dlen > enc_dlen)) {
}
/* GP op header */
- vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
- vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
/*
* In 83XX since we have a limitation of
* IV & Offset control word not part of instruction
req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ outputlen - iv_len);
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
-
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
-
- opcode.s.major |= CPT_DMA_MODE;
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
if (unlikely(size)) {
CPT_LOG_DP_ERR("Insufficient buffer space,"
" size %d needed", size);
- return ERR_BAD_INPUT_ARG;
+ return;
}
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
aad_buf,
aad_offset);
}
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return;
+ }
}
/* mac_data */
if (mac_len) {
CPT_LOG_DP_ERR("Insufficient buffer"
" space, size %d needed",
size);
- return ERR_BAD_INPUT_ARG;
+ return;
}
}
}
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
req->ist.ei2 = rptr_dma;
}
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
- ctx_dma = fc_params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, fctx);
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = ctx_dma;
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_dec_hmac_prep(uint32_t flags,
uint64_t d_offs,
uint64_t d_lens,
uint32_t iv_offset = 0, size;
int32_t inputlen, outputlen, enc_dlen, auth_dlen;
struct cpt_ctx *cpt_ctx;
- int32_t hash_type, mac_len, m_size;
+ int32_t hash_type, mac_len;
uint8_t iv_len = 16;
struct cpt_request_info *req;
buf_ptr_t *meta_p, *aad_buf = NULL;
uint32_t encr_data_len, auth_data_len, aad_len = 0;
uint32_t passthrough_len = 0;
void *m_vaddr, *offset_vaddr;
- uint64_t m_dma, offset_dma, ctx_dma;
- opcode_info_t opcode;
+ uint64_t m_dma, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr;
uint64_t c_dma;
meta_p = &fc_params->meta_buf;
m_vaddr = meta_p->vaddr;
m_dma = meta_p->dma_addr;
- m_size = meta_p->size;
encr_offset = ENCR_OFFSET(d_offs);
auth_offset = AUTH_OFFSET(d_offs);
hash_type = cpt_ctx->hash_type;
mac_len = cpt_ctx->mac_len;
- if (hash_type == GMAC_TYPE)
- encr_data_len = 0;
-
if (unlikely(!(flags & VALID_IV_BUF))) {
iv_len = 0;
iv_offset = ENCR_IV_OFFSET(d_offs);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* start cpt request info structure at 8 byte alignment */
size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
size += sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* Decryption */
- opcode.s.major = CPT_MAJOR_OP_FC;
- opcode.s.minor = 1;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
+ vq_cmd_w0.s.opcode.minor = 1;
+
+ if (hash_type == GMAC_TYPE) {
+ encr_offset = 0;
+ encr_data_len = 0;
+ }
enc_dlen = encr_offset + encr_data_len;
auth_dlen = auth_offset + auth_data_len;
outputlen = enc_dlen;
}
- if (hash_type == GMAC_TYPE)
- encr_offset = inputlen;
-
- vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
- vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
/*
* In 83XX since we have a limitation of
* hmac.
*/
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
-
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
- opcode.s.major |= CPT_DMA_MODE;
-
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
aad_buf,
aad_offset);
}
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return;
+ }
}
/* mac data */
uint32_t aad_offset = aad_len ?
passthrough_len : 0;
- if (!fc_params->src_iov)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(!fc_params->src_iov)) {
+ CPT_LOG_DP_ERR("Bad input args");
+ return;
+ }
i = fill_sg_comp_from_iov(
gather_comp, i,
aad_offset);
}
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return;
+ }
}
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
uint32_t aad_offset = aad_len ?
passthrough_len : 0;
- if (!fc_params->dst_iov)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(!fc_params->dst_iov)) {
+ CPT_LOG_DP_ERR("Bad input args");
+ return;
+ }
i = fill_sg_comp_from_iov(scatter_comp, i,
fc_params->dst_iov, 0,
aad_offset);
}
- if (unlikely(size))
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
req->ist.ei2 = rptr_dma;
}
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
- ctx_dma = fc_params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, fctx);
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = ctx_dma;
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
uint64_t d_offs,
uint64_t d_lens,
buf_ptr_t *buf_p;
uint32_t encr_offset = 0, auth_offset = 0;
uint32_t encr_data_len = 0, auth_data_len = 0;
- int flags, iv_len = 16, m_size;
+ int flags, iv_len = 16;
void *m_vaddr, *c_vaddr;
uint64_t m_dma, c_dma, offset_ctrl;
uint64_t *offset_vaddr, offset_dma;
uint32_t *iv_s, iv[4];
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
- opcode_info_t opcode;
buf_p = ¶ms->meta_buf;
m_vaddr = buf_p->vaddr;
m_dma = buf_p->dma_addr;
- m_size = buf_p->size;
cpt_ctx = params->ctx_buf.vaddr;
flags = cpt_ctx->zsk_flags;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* Reserve memory for cpt request info */
req = m_vaddr;
size = sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
- opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
/* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
- opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
+
+ vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
(0 << 3) | (flags & 0x7));
if (flags == 0x1) {
/*
* GP op header, lengths are expected in bits.
*/
- vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
- vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
/*
* In 83XX since we have a limitation of
req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ outputlen - iv_len);
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
-
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
if (likely(iv_len)) {
uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
m_dma += OFF_CTRL_LEN + iv_len;
- m_size -= OFF_CTRL_LEN + iv_len;
- opcode.s.major |= CPT_DMA_MODE;
-
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
/* DPTR has SG list */
in_buffer = m_vaddr;
i = fill_sg_comp_from_iov(gather_comp, i,
params->src_iov,
0, &size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
/* mac data */
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
}
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
req->ist.ei2 = rptr_dma;
}
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, zs_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
uint64_t d_offs,
uint64_t d_lens,
buf_ptr_t *buf_p;
uint32_t encr_offset;
uint32_t encr_data_len;
- int flags, m_size;
+ int flags;
void *m_vaddr, *c_vaddr;
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
uint32_t *iv_s, iv[4], j;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
- opcode_info_t opcode;
buf_p = ¶ms->meta_buf;
m_vaddr = buf_p->vaddr;
m_dma = buf_p->dma_addr;
- m_size = buf_p->size;
/*
* Microcode expects offsets in bytes
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* Reserve memory for cpt request info */
req = m_vaddr;
size = sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
- opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
/* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
- opcode.s.minor = ((1 << 6) | (snow3g << 5) | (0 << 4) |
+
+ vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
(0 << 3) | (flags & 0x7));
/* consider iv len */
/*
* GP op header, lengths are expected in bits.
*/
- vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
+ vq_cmd_w0.s.param1 = encr_data_len;
/*
* In 83XX since we have a limitation of
req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ outputlen - iv_len);
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
-
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
if (likely(iv_len)) {
uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
m_dma += OFF_CTRL_LEN + iv_len;
- m_size -= OFF_CTRL_LEN + iv_len;
- opcode.s.major |= CPT_DMA_MODE;
-
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
/* DPTR has SG list */
in_buffer = m_vaddr;
i = fill_sg_comp_from_iov(gather_comp, i,
params->src_iov,
0, &size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
req->ist.ei2 = rptr_dma;
}
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, zs_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_kasumi_enc_prep(uint32_t req_flags,
uint64_t d_offs,
uint64_t d_lens,
buf_ptr_t *buf_p;
uint32_t encr_offset, auth_offset;
uint32_t encr_data_len, auth_data_len;
- int flags, m_size;
+ int flags;
uint8_t *iv_s, *iv_d, iv_len = 8;
uint8_t dir = 0;
void *m_vaddr, *c_vaddr;
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
- opcode_info_t opcode;
uint8_t *in_buffer;
uint32_t g_size_bytes, s_size_bytes;
uint64_t dptr_dma, rptr_dma;
buf_p = ¶ms->meta_buf;
m_vaddr = buf_p->vaddr;
m_dma = buf_p->dma_addr;
- m_size = buf_p->size;
encr_offset = ENCR_OFFSET(d_offs) / 8;
auth_offset = AUTH_OFFSET(d_offs) / 8;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* Reserve memory for cpt request info */
req = m_vaddr;
size = sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
- opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
/* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
- opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
+ vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
(dir << 4) | (0 << 3) | (flags & 0x7));
/*
* GP op header, lengths are expected in bits.
*/
- vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
- vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
/* consider iv len */
if (flags == 0x0) {
m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
m_dma += OFF_CTRL_LEN + iv_len;
- m_size -= OFF_CTRL_LEN + iv_len;
/* DPTR has SG list */
in_buffer = m_vaddr;
params->src_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
/* mac data */
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
}
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, k_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_kasumi_dec_prep(uint64_t d_offs,
uint64_t d_lens,
fc_params_t *params,
buf_ptr_t *buf_p;
uint32_t encr_offset;
uint32_t encr_data_len;
- int flags, m_size;
+ int flags;
uint8_t dir = 0;
void *m_vaddr, *c_vaddr;
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
- opcode_info_t opcode;
uint8_t *in_buffer;
uint32_t g_size_bytes, s_size_bytes;
uint64_t dptr_dma, rptr_dma;
buf_p = ¶ms->meta_buf;
m_vaddr = buf_p->vaddr;
m_dma = buf_p->dma_addr;
- m_size = buf_p->size;
encr_offset = ENCR_OFFSET(d_offs) / 8;
encr_data_len = ENCR_DLEN(d_lens);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* Reserve memory for cpt request info */
req = m_vaddr;
size = sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
- opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
/* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
- opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
+ vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
(dir << 4) | (0 << 3) | (flags & 0x7));
/*
* GP op header, lengths are expected in bits.
*/
- vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.param1 = encr_data_len;
/* consider iv len */
encr_offset += iv_len;
m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
m_dma += OFF_CTRL_LEN + iv_len;
- m_size -= OFF_CTRL_LEN + iv_len;
/* DPTR has SG list */
in_buffer = m_vaddr;
i = fill_sg_comp_from_iov(gather_comp, i,
params->src_iov,
0, &size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
i = fill_sg_comp_from_iov(scatter_comp, i,
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, k_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
static __rte_always_inline void *
uint64_t d_offs,
uint64_t d_lens,
fc_params_t *fc_params,
- void *op, int *ret_val)
+ void *op)
{
struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
uint8_t fc_type;
void *prep_req = NULL;
- int ret;
fc_type = ctx->fc_type;
if (likely(fc_type == FC_GEN)) {
- ret = cpt_dec_hmac_prep(flags, d_offs, d_lens,
- fc_params, op, &prep_req);
+ cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
} else if (fc_type == ZUC_SNOW3G) {
- ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens,
- fc_params, op, &prep_req);
+ cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
} else if (fc_type == KASUMI) {
- ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op,
- &prep_req);
- } else {
- /*
- * For AUTH_ONLY case,
- * MC only supports digest generation and verification
- * should be done in software by memcmp()
- */
-
- ret = ERR_EIO;
+ cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
}
- if (unlikely(!prep_req))
- *ret_val = ret;
+ /*
+ * For AUTH_ONLY case,
+ * MC only supports digest generation and verification
+ * should be done in software by memcmp()
+ */
+
return prep_req;
}
-static __rte_always_inline void *__hot
+static __rte_always_inline void *__rte_hot
cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
- fc_params_t *fc_params, void *op, int *ret_val)
+ fc_params_t *fc_params, void *op)
{
struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
uint8_t fc_type;
void *prep_req = NULL;
- int ret;
fc_type = ctx->fc_type;
/* Common api for rest of the ops */
if (likely(fc_type == FC_GEN)) {
- ret = cpt_enc_hmac_prep(flags, d_offs, d_lens,
- fc_params, op, &prep_req);
+ cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
} else if (fc_type == ZUC_SNOW3G) {
- ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens,
- fc_params, op, &prep_req);
+ cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
} else if (fc_type == KASUMI) {
- ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens,
- fc_params, op, &prep_req);
+ cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
} else if (fc_type == HASH_HMAC) {
- ret = cpt_digest_gen_prep(flags, d_lens, fc_params, op,
- &prep_req);
- } else {
- ret = ERR_EIO;
+ cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
}
- if (unlikely(!prep_req))
- *ret_val = ret;
return prep_req;
}
static __rte_always_inline int
-cpt_fc_auth_set_key(void *ctx, auth_type_t type, uint8_t *key,
- uint16_t key_len, uint16_t mac_len)
+cpt_fc_auth_set_key(struct cpt_ctx *cpt_ctx, auth_type_t type,
+ const uint8_t *key, uint16_t key_len, uint16_t mac_len)
{
- struct cpt_ctx *cpt_ctx = ctx;
- mc_fc_context_t *fctx = &cpt_ctx->fctx;
- uint64_t *ctrl_flags = NULL;
+ mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
+ mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
+ mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
uint32_t keyx[4];
case SNOW3G_UIA2:
cpt_ctx->snow3g = 1;
gen_key_snow3g(key, keyx);
- memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
+ memcpy(zs_ctx->ci_key, keyx, key_len);
cpt_ctx->fc_type = ZUC_SNOW3G;
cpt_ctx->zsk_flags = 0x1;
break;
case ZUC_EIA3:
cpt_ctx->snow3g = 0;
- memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
- memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
+ memcpy(zs_ctx->ci_key, key, key_len);
+ memcpy(zs_ctx->zuc_const, zuc_d, 32);
cpt_ctx->fc_type = ZUC_SNOW3G;
cpt_ctx->zsk_flags = 0x1;
break;
case KASUMI_F9_ECB:
/* Kasumi ECB mode */
cpt_ctx->k_ecb = 1;
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->fc_type = KASUMI;
cpt_ctx->zsk_flags = 0x1;
break;
case KASUMI_F9_CBC:
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->fc_type = KASUMI;
cpt_ctx->zsk_flags = 0x1;
break;
cpt_ctx->fc_type = HASH_HMAC;
}
- ctrl_flags = (uint64_t *)&fctx->enc.enc_ctrl.flags;
- *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
+ if (cpt_ctx->fc_type == FC_GEN && key_len > 64)
+ return -1;
/* For GMAC auth, cipher must be NULL */
if (type == GMAC_TYPE)
- CPT_P_ENC_CTRL(fctx).enc_cipher = 0;
+ fctx->enc.enc_cipher = 0;
- CPT_P_ENC_CTRL(fctx).hash_type = cpt_ctx->hash_type = type;
- CPT_P_ENC_CTRL(fctx).mac_len = cpt_ctx->mac_len = mac_len;
+ fctx->enc.hash_type = cpt_ctx->hash_type = type;
+ fctx->enc.mac_len = cpt_ctx->mac_len = mac_len;
if (key_len) {
cpt_ctx->hmac = 1;
cpt_ctx->auth_key_len = key_len;
memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
- memcpy(fctx->hmac.opad, key, key_len);
- CPT_P_ENC_CTRL(fctx).auth_input_type = 1;
+
+ if (key_len <= 64)
+ memcpy(fctx->hmac.opad, key, key_len);
+ fctx->enc.auth_input_type = 1;
}
- *ctrl_flags = rte_cpu_to_be_64(*ctrl_flags);
return 0;
}
cipher_type_t enc_type = 0; /* NULL Cipher type */
auth_type_t auth_type = 0; /* NULL Auth type */
uint32_t cipher_key_len = 0;
- uint8_t zsk_flag = 0, aes_gcm = 0;
+ uint8_t aes_gcm = 0;
aead_form = &xform->aead;
- void *ctx;
+ void *ctx = SESS_PRIV(sess);
- if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
- aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
sess->cpt_op |= CPT_OP_CIPHER_ENCRYPT;
sess->cpt_op |= CPT_OP_AUTH_GENERATE;
- } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
- aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
sess->cpt_op |= CPT_OP_CIPHER_DECRYPT;
sess->cpt_op |= CPT_OP_AUTH_VERIFY;
} else {
- CPT_LOG_DP_ERR("Unknown cipher operation\n");
+ CPT_LOG_DP_ERR("Unknown aead operation\n");
return -1;
}
switch (aead_form->algo) {
CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
aead_form->algo);
return -1;
+ case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+ enc_type = CHACHA20;
+ auth_type = POLY1305;
+ cipher_key_len = 32;
+ sess->chacha_poly = 1;
+ break;
default:
CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
aead_form->algo);
(unsigned int long)aead_form->key.length);
return -1;
}
- sess->zsk_flag = zsk_flag;
+ sess->zsk_flag = 0;
sess->aes_gcm = aes_gcm;
sess->mac_len = aead_form->digest_length;
sess->iv_offset = aead_form->iv.offset;
sess->iv_length = aead_form->iv.length;
sess->aad_length = aead_form->aad_length;
- ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
- cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
- aead_form->key.length, NULL);
+ if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
+ aead_form->key.length, NULL)))
+ return -1;
- cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, aead_form->digest_length);
+ if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
+ aead_form->digest_length)))
+ return -1;
return 0;
}
struct rte_crypto_cipher_xform *c_form;
cipher_type_t enc_type = 0; /* NULL Cipher type */
uint32_t cipher_key_len = 0;
- uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0;
-
- if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
- return -1;
+ uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
c_form = &xform->cipher;
}
sess->zsk_flag = zsk_flag;
- sess->aes_gcm = aes_gcm;
+ sess->aes_gcm = 0;
sess->aes_ctr = aes_ctr;
sess->iv_offset = c_form->iv.offset;
sess->iv_length = c_form->iv.length;
sess->is_null = is_null;
- cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type, c_form->key.data,
- c_form->key.length, NULL);
+ if (unlikely(cpt_fc_ciph_set_key(SESS_PRIV(sess), enc_type,
+ c_form->key.data, c_form->key.length, NULL)))
+ return -1;
return 0;
}
auth_type_t auth_type = 0; /* NULL Auth type */
uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
- if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
- goto error_out;
-
a_form = &xform->auth;
if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
return -1;
}
- if (a_form->key.length > 64) {
- CPT_LOG_DP_ERR("Auth key length is big");
- return -1;
- }
-
switch (a_form->algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
/* Fall through */
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
a_form->algo);
- goto error_out;
+ return -1;
default:
CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
a_form->algo);
- goto error_out;
+ return -1;
}
sess->zsk_flag = zsk_flag;
sess->auth_iv_offset = a_form->iv.offset;
sess->auth_iv_length = a_form->iv.length;
}
- cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type, a_form->key.data,
- a_form->key.length, a_form->digest_length);
+ if (unlikely(cpt_fc_auth_set_key(SESS_PRIV(sess), auth_type,
+ a_form->key.data, a_form->key.length,
+ a_form->digest_length)))
+ return -1;
return 0;
-
-error_out:
- return -1;
}
static __rte_always_inline int
struct rte_crypto_auth_xform *a_form;
cipher_type_t enc_type = 0; /* NULL Cipher type */
auth_type_t auth_type = 0; /* NULL Auth type */
- uint8_t zsk_flag = 0, aes_gcm = 0;
- void *ctx;
-
- if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
- return -1;
+ void *ctx = SESS_PRIV(sess);
a_form = &xform->auth;
return -1;
}
- sess->zsk_flag = zsk_flag;
- sess->aes_gcm = aes_gcm;
+ sess->zsk_flag = 0;
+ sess->aes_gcm = 0;
sess->is_gmac = 1;
sess->iv_offset = a_form->iv.offset;
sess->iv_length = a_form->iv.length;
sess->mac_len = a_form->digest_length;
- ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
- cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
- a_form->key.length, NULL);
- cpt_fc_auth_set_key(ctx, auth_type, NULL, 0, a_form->digest_length);
+ if (unlikely(cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
+ a_form->key.length, NULL)))
+ return -1;
+
+ if (unlikely(cpt_fc_auth_set_key(ctx, auth_type, NULL, 0,
+ a_form->digest_length)))
+ return -1;
return 0;
}
tailroom = rte_pktmbuf_tailroom(m_src);
if (likely(tailroom > len + 8)) {
mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
- mphys = m_src->buf_physaddr + m_src->buf_len;
+ mphys = m_src->buf_iova + m_src->buf_len;
mdata -= len;
mphys -= len;
buf->vaddr = mdata;
if (!start_offset) {
seg_data = rte_pktmbuf_mtod(pkt, void *);
- seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_phys = rte_pktmbuf_iova(pkt);
seg_size = pkt->data_len;
} else {
while (start_offset >= pkt->data_len) {
}
seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
- seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
+ seg_phys = rte_pktmbuf_iova_offset(pkt, start_offset);
seg_size = pkt->data_len - start_offset;
if (!seg_size)
return 1;
while (unlikely(pkt != NULL)) {
seg_data = rte_pktmbuf_mtod(pkt, void *);
- seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_phys = rte_pktmbuf_iova(pkt);
seg_size = pkt->data_len;
if (!seg_size)
break;
iov_ptr_t *iovec;
seg_data = rte_pktmbuf_mtod(pkt, void *);
- seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_phys = rte_pktmbuf_iova(pkt);
seg_size = pkt->data_len;
/* first seg */
while (unlikely(pkt != NULL)) {
seg_data = rte_pktmbuf_mtod(pkt, void *);
- seg_phys = rte_pktmbuf_mtophys(pkt);
+ seg_phys = rte_pktmbuf_iova(pkt);
seg_size = pkt->data_len;
if (!seg_size)
return 0;
}
-static __rte_always_inline void *
+static __rte_always_inline int
fill_fc_params(struct rte_crypto_op *cop,
struct cpt_sess_misc *sess_misc,
+ struct cpt_qp_meta_info *m_info,
void **mdata_ptr,
- int *op_ret)
+ void **prep_req)
{
uint32_t space = 0;
struct rte_crypto_sym_op *sym_op = cop->sym;
- void *mdata;
+ void *mdata = NULL;
uintptr_t *op;
uint32_t mc_hash_off;
uint32_t flags = 0;
uint64_t d_offs, d_lens;
- void *prep_req = NULL;
struct rte_mbuf *m_src, *m_dst;
uint8_t cpt_op = sess_misc->cpt_op;
- uint8_t zsk_flag = sess_misc->zsk_flag;
- uint8_t aes_gcm = sess_misc->aes_gcm;
- uint16_t mac_len = sess_misc->mac_len;
#ifdef CPT_ALWAYS_USE_SG_MODE
uint8_t inplace = 0;
#else
char src[SRC_IOV_SIZE];
char dst[SRC_IOV_SIZE];
uint32_t iv_buf[4];
- struct cptvf_meta_info *cpt_m_info =
- (struct cptvf_meta_info *)(*mdata_ptr);
+ int ret;
if (likely(sess_misc->iv_length)) {
flags |= VALID_IV_BUF;
}
}
- if (zsk_flag) {
+ if (sess_misc->zsk_flag) {
fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
uint8_t *,
sess_misc->auth_iv_offset);
- if (zsk_flag == K_F9) {
- CPT_LOG_DP_ERR("Should not reach here for "
- "kasumi F9\n");
- }
- if (zsk_flag != ZS_EA)
+ if (sess_misc->zsk_flag != ZS_EA)
inplace = 0;
}
m_src = sym_op->m_src;
m_dst = sym_op->m_dst;
- if (aes_gcm) {
+ if (sess_misc->aes_gcm || sess_misc->chacha_poly) {
uint8_t *salt;
uint8_t *aad_data;
uint16_t aad_len;
sess_misc->salt = *(uint32_t *)salt;
}
fc_params.iv_buf = salt + 4;
- if (likely(mac_len)) {
+ if (likely(sess_misc->mac_len)) {
struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
m_src;
}
fc_params.iv_buf = salt + 4;
}
- if (likely(mac_len)) {
+ if (likely(sess_misc->mac_len)) {
struct rte_mbuf *m;
m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
&fc_params,
&flags))) {
CPT_LOG_DP_ERR("Prepare inplace src iov failed");
- *op_ret = -1;
- return NULL;
+ ret = -EINVAL;
+ goto err_exit;
}
} else {
/* Store SG I/O in the api for reuse */
if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
CPT_LOG_DP_ERR("Prepare src iov failed");
- *op_ret = -1;
- return NULL;
+ ret = -EINVAL;
+ goto err_exit;
}
if (unlikely(m_dst != NULL)) {
uint32_t pkt_len;
/* Try to make room as much as src has */
- m_dst = sym_op->m_dst;
pkt_len = rte_pktmbuf_pkt_len(m_dst);
if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
"m_dst %p, need %u"
" more",
m_dst, pkt_len);
- return NULL;
+ ret = -EINVAL;
+ goto err_exit;
}
}
if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
CPT_LOG_DP_ERR("Prepare dst iov failed for "
"m_dst %p", m_dst);
- return NULL;
+ ret = -EINVAL;
+ goto err_exit;
}
} else {
fc_params.dst_iov = (void *)src;
}
if (likely(flags & SINGLE_BUF_HEADTAILROOM))
- mdata = alloc_op_meta(m_src,
- &fc_params.meta_buf,
- cpt_m_info->cptvf_op_sb_mlen,
- cpt_m_info->cptvf_meta_pool);
+ mdata = alloc_op_meta(m_src, &fc_params.meta_buf,
+ m_info->lb_mlen, m_info->pool);
else
- mdata = alloc_op_meta(NULL,
- &fc_params.meta_buf,
- cpt_m_info->cptvf_op_mlen,
- cpt_m_info->cptvf_meta_pool);
+ mdata = alloc_op_meta(NULL, &fc_params.meta_buf,
+ m_info->sg_mlen, m_info->pool);
if (unlikely(mdata == NULL)) {
CPT_LOG_DP_ERR("Error allocating meta buffer for request");
- return NULL;
+ ret = -ENOMEM;
+ goto err_exit;
}
op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
/* Finally prepare the instruction */
if (cpt_op & CPT_OP_ENCODE)
- prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
- &fc_params, op, op_ret);
+ *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
+ &fc_params, op);
else
- prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
- &fc_params, op, op_ret);
+ *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
+ &fc_params, op);
+
+ if (unlikely(*prep_req == NULL)) {
+ CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
+ }
- if (unlikely(!prep_req))
- free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
*mdata_ptr = mdata;
- return prep_req;
+
+ return 0;
+
+free_mdata_and_exit:
+ free_op_meta(mdata, m_info->pool);
+err_exit:
+ return ret;
}
static __rte_always_inline void
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
-static __rte_always_inline int
-instance_session_cfg(struct rte_crypto_sym_xform *xform, void *sess)
-{
- struct rte_crypto_sym_xform *chain;
-
- CPT_PMD_INIT_FUNC_TRACE();
-
- if (cpt_is_algo_supported(xform))
- goto err;
-
- chain = xform;
- while (chain) {
- switch (chain->type) {
- case RTE_CRYPTO_SYM_XFORM_AEAD:
- if (fill_sess_aead(chain, sess))
- goto err;
- break;
- case RTE_CRYPTO_SYM_XFORM_CIPHER:
- if (fill_sess_cipher(chain, sess))
- goto err;
- break;
- case RTE_CRYPTO_SYM_XFORM_AUTH:
- if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
- if (fill_sess_gmac(chain, sess))
- goto err;
- } else {
- if (fill_sess_auth(chain, sess))
- goto err;
- }
- break;
- default:
- CPT_LOG_DP_ERR("Invalid crypto xform type");
- break;
- }
- chain = chain->next;
- }
-
- return 0;
-
-err:
- return -1;
-}
-
static __rte_always_inline void
find_kasumif9_direction_and_length(uint8_t *src,
uint32_t counter_num_bytes,
/*
* This handles all auth only except AES_GMAC
*/
-static __rte_always_inline void *
+static __rte_always_inline int
fill_digest_params(struct rte_crypto_op *cop,
struct cpt_sess_misc *sess,
+ struct cpt_qp_meta_info *m_info,
void **mdata_ptr,
- int *op_ret)
+ void **prep_req)
{
uint32_t space = 0;
struct rte_crypto_sym_op *sym_op = cop->sym;
uint32_t auth_range_off;
uint32_t flags = 0;
uint64_t d_offs = 0, d_lens;
- void *prep_req = NULL;
struct rte_mbuf *m_src, *m_dst;
uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
- uint8_t zsk_flag = sess->zsk_flag;
uint16_t mac_len = sess->mac_len;
fc_params_t params;
char src[SRC_IOV_SIZE];
uint8_t iv_buf[16];
+ int ret;
+
memset(¶ms, 0, sizeof(fc_params_t));
- struct cptvf_meta_info *cpt_m_info =
- (struct cptvf_meta_info *)(*mdata_ptr);
m_src = sym_op->m_src;
/* For just digest lets force mempool alloc */
- mdata = alloc_op_meta(NULL, ¶ms.meta_buf, cpt_m_info->cptvf_op_mlen,
- cpt_m_info->cptvf_meta_pool);
+ mdata = alloc_op_meta(NULL, ¶ms.meta_buf, m_info->sg_mlen,
+ m_info->pool);
if (mdata == NULL) {
- CPT_LOG_DP_ERR("Error allocating meta buffer for request");
- *op_ret = -ENOMEM;
- return NULL;
+ ret = -ENOMEM;
+ goto err_exit;
}
mphys = params.meta_buf.dma_addr;
flags = VALID_MAC_BUF;
params.src_iov = (void *)src;
- if (unlikely(zsk_flag)) {
+ if (unlikely(sess->zsk_flag)) {
/*
* Since for Zuc, Kasumi, Snow3g offsets are in bits
* we will send pass through even for auth only case,
auth_range_off = 0;
params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
uint8_t *, sess->auth_iv_offset);
- if (zsk_flag == K_F9) {
+ if (sess->zsk_flag == K_F9) {
uint32_t length_in_bits, num_bytes;
uint8_t *src, direction = 0;
- uint32_t counter_num_bytes;
memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
uint8_t *), 8);
*/
length_in_bits = cop->sym->auth.data.length;
num_bytes = (length_in_bits >> 3);
- counter_num_bytes = num_bytes;
src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
find_kasumif9_direction_and_length(src,
- counter_num_bytes,
+ num_bytes,
&length_in_bits,
&direction);
length_in_bits -= 64;
if (!rte_pktmbuf_append(m_dst, space)) {
CPT_LOG_DP_ERR("Failed to extend "
"mbuf by %uB", space);
- goto err;
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
}
params.mac_buf.vaddr =
rte_pktmbuf_mtod_offset(m_dst, void *, off);
params.mac_buf.dma_addr =
- rte_pktmbuf_mtophys_offset(m_dst, off);
+ rte_pktmbuf_iova_offset(m_dst, off);
params.mac_buf.size = mac_len;
}
} else {
/*Store SG I/O in the api for reuse */
if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
CPT_LOG_DP_ERR("Prepare src iov failed");
- *op_ret = -1;
- goto err;
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
+ }
+
+ *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
+ if (unlikely(*prep_req == NULL)) {
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
}
- prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
- ¶ms, op, op_ret);
*mdata_ptr = mdata;
- return prep_req;
-err:
- if (unlikely(!prep_req))
- free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
- return NULL;
+
+ return 0;
+
+free_mdata_and_exit:
+ free_op_meta(mdata, m_info->pool);
+err_exit:
+ return ret;
}
#endif /*_CPT_UCODE_H_ */