}
static __rte_always_inline void
-cpt_fc_salt_update(void *ctx,
+cpt_fc_salt_update(struct cpt_ctx *cpt_ctx,
uint8_t *salt)
{
- struct cpt_ctx *cpt_ctx = ctx;
- memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
+ mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
+ memcpy(fctx->enc.encr_iv, salt, 4);
}
static __rte_always_inline int
cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
+ mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
uint32_t keyx[4];
+
cpt_ctx->snow3g = 1;
gen_key_snow3g(key, keyx);
- memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
+ memcpy(zs_ctx->ci_key, keyx, key_len);
cpt_ctx->zsk_flags = 0;
}
cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
+ mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
+
cpt_ctx->snow3g = 0;
- memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
- memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
+ memcpy(zs_ctx->ci_key, key, key_len);
+ memcpy(zs_ctx->zuc_const, zuc_d, 32);
cpt_ctx->zsk_flags = 0;
}
cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
+ mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
+
cpt_ctx->k_ecb = 1;
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->zsk_flags = 0;
}
cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
+
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->zsk_flags = 0;
}
static __rte_always_inline int
-cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, const uint8_t *key,
- uint16_t key_len, uint8_t *salt)
+cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
+ const uint8_t *key, uint16_t key_len, uint8_t *salt)
{
- struct cpt_ctx *cpt_ctx = ctx;
- mc_fc_context_t *fctx = &cpt_ctx->fctx;
+ mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
int ret;
ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
uint32_t g_size_bytes, s_size_bytes;
uint64_t dptr_dma, rptr_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr, *m_vaddr;
uint64_t c_dma, m_dma;
opcode_info_t opcode;
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
uint32_t encr_data_len, auth_data_len, aad_len = 0;
uint32_t passthrough_len = 0;
void *m_vaddr, *offset_vaddr;
- uint64_t m_dma, offset_dma, ctx_dma;
+ uint64_t m_dma, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr;
uint64_t c_dma;
opcode_info_t opcode;
req->ist.ei2 = rptr_dma;
}
- ctx_dma = fc_params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, fctx);
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = ctx_dma;
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
uint32_t encr_data_len, auth_data_len, aad_len = 0;
uint32_t passthrough_len = 0;
void *m_vaddr, *offset_vaddr;
- uint64_t m_dma, offset_dma, ctx_dma;
+ uint64_t m_dma, offset_dma;
opcode_info_t opcode;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr;
uint64_t c_dma;
req->ist.ei2 = rptr_dma;
}
- ctx_dma = fc_params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, fctx);
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = ctx_dma;
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
uint64_t *offset_vaddr, offset_dma;
uint32_t *iv_s, iv[4];
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
buf_p = ¶ms->meta_buf;
req->ist.ei2 = rptr_dma;
}
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, zs_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
uint64_t *offset_vaddr, offset_dma;
uint32_t *iv_s, iv[4], j;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
buf_p = ¶ms->meta_buf;
req->ist.ei2 = rptr_dma;
}
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, zs_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
uint8_t *in_buffer;
uint32_t g_size_bytes, s_size_bytes;
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, k_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
uint8_t *in_buffer;
uint32_t g_size_bytes, s_size_bytes;
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, k_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
}
static __rte_always_inline int
-cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
- uint16_t key_len, uint16_t mac_len)
+cpt_fc_auth_set_key(struct cpt_ctx *cpt_ctx, auth_type_t type,
+ const uint8_t *key, uint16_t key_len, uint16_t mac_len)
{
- struct cpt_ctx *cpt_ctx = ctx;
- mc_fc_context_t *fctx = &cpt_ctx->fctx;
+ mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
+ mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
+ mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
uint32_t keyx[4];
case SNOW3G_UIA2:
cpt_ctx->snow3g = 1;
gen_key_snow3g(key, keyx);
- memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
+ memcpy(zs_ctx->ci_key, keyx, key_len);
cpt_ctx->fc_type = ZUC_SNOW3G;
cpt_ctx->zsk_flags = 0x1;
break;
case ZUC_EIA3:
cpt_ctx->snow3g = 0;
- memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
- memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
+ memcpy(zs_ctx->ci_key, key, key_len);
+ memcpy(zs_ctx->zuc_const, zuc_d, 32);
cpt_ctx->fc_type = ZUC_SNOW3G;
cpt_ctx->zsk_flags = 0x1;
break;
case KASUMI_F9_ECB:
/* Kasumi ECB mode */
cpt_ctx->k_ecb = 1;
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->fc_type = KASUMI;
cpt_ctx->zsk_flags = 0x1;
break;
case KASUMI_F9_CBC:
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->fc_type = KASUMI;
cpt_ctx->zsk_flags = 0x1;
break;
{
struct rte_crypto_sym_xform *temp_xform = xform;
struct cpt_sess_misc *misc;
+ vq_cmd_word3_t vq_cmd_w3;
void *priv;
int ret;
}
memset(priv, 0, sizeof(struct cpt_sess_misc) +
- offsetof(struct cpt_ctx, fctx));
+ offsetof(struct cpt_ctx, mc_ctx));
misc = priv;
misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
sizeof(struct cpt_sess_misc);
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
+ mc_ctx);
+
+ misc->cpt_inst_w7 = vq_cmd_w3.u64;
+
return 0;
priv_put:
return ret;
}
+ priv->cpt_inst_w7 = 0;
+
set_asym_session_private_data(sess, dev->driver_id, priv);
return 0;
}
static __rte_always_inline int32_t __rte_hot
otx_cpt_request_enqueue(struct cpt_instance *instance,
struct pending_queue *pqueue,
- void *req)
+ void *req, uint64_t cpt_inst_w7)
{
struct cpt_request_info *user_req = (struct cpt_request_info *)req;
if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
return -EAGAIN;
- fill_cpt_inst(instance, req);
+ fill_cpt_inst(instance, req, cpt_inst_w7);
CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
goto req_fail;
}
- ret = otx_cpt_request_enqueue(instance, pqueue, params.req);
+ ret = otx_cpt_request_enqueue(instance, pqueue, params.req,
+ sess->cpt_inst_w7);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("Could not enqueue crypto req");
{
struct cpt_sess_misc *sess;
struct rte_crypto_sym_op *sym_op = op->sym;
- void *prep_req, *mdata = NULL;
+ struct cpt_request_info *prep_req;
+ void *mdata = NULL;
int ret = 0;
uint64_t cpt_op;
if (likely(cpt_op & CPT_OP_CIPHER_MASK))
ret = fill_fc_params(op, sess, &instance->meta_info, &mdata,
- &prep_req);
+ (void **)&prep_req);
else
ret = fill_digest_params(op, sess, &instance->meta_info,
- &mdata, &prep_req);
+ &mdata, (void **)&prep_req);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
}
/* Enqueue prepared instruction to h/w */
- ret = otx_cpt_request_enqueue(instance, pqueue, prep_req);
+ ret = otx_cpt_request_enqueue(instance, pqueue, prep_req,
+ sess->cpt_inst_w7);
if (unlikely(ret)) {
/* Buffer allocated for request preparation need to be freed */
{
struct rte_crypto_sym_xform *temp_xform = xform;
struct cpt_sess_misc *misc;
+ vq_cmd_word3_t vq_cmd_w3;
void *priv;
int ret;
}
memset(priv, 0, sizeof(struct cpt_sess_misc) +
- offsetof(struct cpt_ctx, fctx));
+ offsetof(struct cpt_ctx, mc_ctx));
misc = priv;
misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
sizeof(struct cpt_sess_misc);
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
+ mc_ctx);
+
/*
* IE engines support IPsec operations
* SE engines support IPsec operations, Chacha-Poly and
* Air-Crypto operations
*/
if (misc->zsk_flag || misc->chacha_poly)
- misc->egrp = OTX2_CPT_EGRP_SE;
+ vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE;
else
- misc->egrp = OTX2_CPT_EGRP_SE_IE;
+ vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE_IE;
+
+ misc->cpt_inst_w7 = vq_cmd_w3.u64;
return 0;
static __rte_always_inline void __rte_hot
otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
struct cpt_request_info *req,
- void *lmtline)
+ void *lmtline,
+ uint64_t cpt_inst_w7)
{
union cpt_inst_s inst;
uint64_t lmt_status;
inst.s9x.ei0 = req->ist.ei0;
inst.s9x.ei1 = req->ist.ei1;
inst.s9x.ei2 = req->ist.ei2;
- inst.s9x.ei3 = req->ist.ei3;
+ inst.s9x.ei3 = cpt_inst_w7;
inst.s9x.qord = 1;
inst.s9x.grp = qp->ev.queue_id;
static __rte_always_inline int32_t __rte_hot
otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
struct pending_queue *pend_q,
- struct cpt_request_info *req)
+ struct cpt_request_info *req,
+ uint64_t cpt_inst_w7)
{
void *lmtline = qp->lmtline;
union cpt_inst_s inst;
uint64_t lmt_status;
if (qp->ca_enable) {
- otx2_ca_enqueue_req(qp, req, lmtline);
+ otx2_ca_enqueue_req(qp, req, lmtline, cpt_inst_w7);
return 0;
}
inst.s9x.ei0 = req->ist.ei0;
inst.s9x.ei1 = req->ist.ei1;
inst.s9x.ei2 = req->ist.ei2;
- inst.s9x.ei3 = req->ist.ei3;
+ inst.s9x.ei3 = cpt_inst_w7;
req->time_out = rte_get_timer_cycles() +
DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
struct rte_crypto_asym_op *asym_op = op->asym;
struct asym_op_params params = {0};
struct cpt_asym_sess_misc *sess;
- vq_cmd_word3_t *w3;
uintptr_t *cop;
void *mdata;
int ret;
goto req_fail;
}
- /* Set engine group of AE */
- w3 = (vq_cmd_word3_t *)¶ms.req->ist.ei3;
- w3->s.grp = OTX2_CPT_EGRP_AE;
-
- ret = otx2_cpt_enqueue_req(qp, pend_q, params.req);
+ ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, sess->cpt_inst_w7);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("Could not enqueue crypto req");
struct rte_crypto_sym_op *sym_op = op->sym;
struct cpt_request_info *req;
struct cpt_sess_misc *sess;
- vq_cmd_word3_t *w3;
uint64_t cpt_op;
void *mdata;
int ret;
return ret;
}
- w3 = ((vq_cmd_word3_t *)(&req->ist.ei3));
- w3->s.grp = sess->egrp;
-
- ret = otx2_cpt_enqueue_req(qp, pend_q, req);
+ ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
if (unlikely(ret)) {
/* Free buffer allocated by fill params routines */
return ret;
}
- ret = otx2_cpt_enqueue_req(qp, pend_q, req);
+ ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
return ret;
}
struct rte_mempool *pool)
{
struct cpt_asym_sess_misc *priv;
+ vq_cmd_word3_t vq_cmd_w3;
int ret;
CPT_PMD_INIT_FUNC_TRACE();
return ret;
}
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = OTX2_CPT_EGRP_AE;
+ priv->cpt_inst_w7 = vq_cmd_w3.u64;
+
set_asym_session_private_data(sess, dev->driver_id, priv);
+
return 0;
}