X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fcnxk%2Fcn10k_cryptodev_ops.c;h=1905ea394a6900849f94b079558264eb9857b7e2;hb=5c374e9d7442744e302eb7d349307a4d2951c9f1;hp=ec301f4af7269ee1853de8d7d3ff382658f81484;hpb=a21db2f7c5ca34178a22ce26e6ac78fd8daa0199;p=dpdk.git diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c index ec301f4af7..1905ea394a 100644 --- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c +++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c @@ -3,14 +3,21 @@ */ #include -#include +#include +#include +#include #include "cn10k_cryptodev.h" #include "cn10k_cryptodev_ops.h" +#include "cn10k_ipsec_la_ops.h" +#include "cn10k_ipsec.h" +#include "cnxk_ae.h" #include "cnxk_cryptodev.h" #include "cnxk_cryptodev_ops.h" #include "cnxk_se.h" +#include "roc_api.h" + static inline struct cnxk_se_sess * cn10k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op) { @@ -41,30 +48,73 @@ sess_put: return NULL; } +static __rte_always_inline int __rte_hot +cpt_sec_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, + struct cn10k_sec_session *sess, + struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst) +{ + struct rte_crypto_sym_op *sym_op = op->sym; + struct cn10k_ipsec_sa *sa; + int ret; + + if (unlikely(sym_op->m_dst && sym_op->m_dst != sym_op->m_src)) { + plt_dp_err("Out of place is not supported"); + return -ENOTSUP; + } + + if (unlikely(!rte_pktmbuf_is_contiguous(sym_op->m_src))) { + plt_dp_err("Scatter Gather mode is not supported"); + return -ENOTSUP; + } + + sa = &sess->sa; + + if (sa->is_outbound) + ret = process_outb_sa(&qp->lf, op, sa, inst); + else { + infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND; + ret = process_inb_sa(op, sa, inst); + } + + return ret; +} + static __rte_always_inline int __rte_hot cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, struct cnxk_se_sess *sess, struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst) { - RTE_SET_USED(qp); - RTE_SET_USED(op); - RTE_SET_USED(sess); - RTE_SET_USED(infl_req); - RTE_SET_USED(inst); + uint64_t cpt_op; + int ret; - return -ENOTSUP; + cpt_op = sess->cpt_op; + + if (cpt_op & ROC_SE_OP_CIPHER_MASK) + ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst); + else + ret = fill_digest_params(op, sess, &qp->meta_info, infl_req, + inst); + + return ret; } static inline int cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[], struct cpt_inst_s inst[], struct cpt_inflight_req *infl_req) { + struct cn10k_sec_session *sec_sess; + struct rte_crypto_asym_op *asym_op; struct rte_crypto_sym_op *sym_op; + struct cnxk_ae_sess *ae_sess; struct cnxk_se_sess *sess; struct rte_crypto_op *op; uint64_t w7; int ret; + const union cpt_res_s res = { + .cn10k.compcode = CPT_COMP_NOT_DONE, + }; + op = ops[0]; inst[0].w0.u64 = 0; @@ -74,7 +124,15 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[], sym_op = op->sym; if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { - if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { + sec_sess = get_sec_session_private_data( + sym_op->sec_session); + ret = cpt_sec_inst_fill(qp, op, sec_sess, infl_req, + &inst[0]); + if (unlikely(ret)) + return 0; + w7 = sec_sess->sa.inst.w7; + } else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { sess = get_sym_session_private_data( sym_op->session, cn10k_cryptodev_driver_id); ret = cpt_sym_inst_fill(qp, op, sess, infl_req, @@ -99,13 +157,28 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[], } w7 = sess->cpt_inst_w7; } + } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { + + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + asym_op = op->asym; + ae_sess = get_asym_session_private_data( + asym_op->session, cn10k_cryptodev_driver_id); + ret = cnxk_ae_enqueue(qp, op, infl_req, &inst[0], + ae_sess); + if (unlikely(ret)) + return 0; + w7 = ae_sess->cpt_inst_w7; + } else { + plt_dp_err("Not supported Asym op without session"); + return 0; + } } else { plt_dp_err("Unsupported op type"); return 0; } inst[0].res_addr = (uint64_t)&infl_req->res; - infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE; + __atomic_store_n(&infl_req->res.u64[0], res.u64[0], __ATOMIC_RELAXED); infl_req->cop = op; inst[0].w7.u64 = w7; @@ -126,11 +199,15 @@ cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) struct pending_queue *pend_q; struct cpt_inst_s *inst; uint16_t lmt_id; + uint64_t head; int ret, i; pend_q = &qp->pend_q; - nb_allowed = qp->lf.nb_desc - pend_q->pending_count; + const uint64_t pq_mask = pend_q->pq_mask; + + head = pend_q->head; + nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask); nb_ops = RTE_MIN(nb_ops, nb_allowed); if (unlikely(nb_ops == 0)) @@ -144,18 +221,18 @@ cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) again: for (i = 0; i < RTE_MIN(PKTS_PER_LOOP, nb_ops); i++) { - infl_req = &pend_q->req_queue[pend_q->enq_tail]; + infl_req = &pend_q->req_queue[head]; infl_req->op_flags = 0; ret = cn10k_cpt_fill_inst(qp, ops + i, &inst[2 * i], infl_req); if (unlikely(ret != 1)) { plt_dp_err("Could not process op: %p", ops + i); if (i == 0) - goto update_pending; + goto pend_q_commit; break; } - MOD_INC(pend_q->enq_tail, qp->lf.nb_desc); + pending_queue_advance(&head, pq_mask); } if (i > PKTS_PER_STEORL) { @@ -181,27 +258,182 @@ again: goto again; } -update_pending: - pend_q->pending_count += count + i; +pend_q_commit: + rte_atomic_thread_fence(__ATOMIC_RELEASE); + pend_q->head = head; pend_q->time_out = rte_get_timer_cycles() + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz(); return count + i; } +uint16_t +cn10k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op) +{ + union rte_event_crypto_metadata *ec_mdata; + struct cpt_inflight_req *infl_req; + struct rte_event *rsp_info; + uint64_t lmt_base, lmt_arg; + struct cpt_inst_s *inst; + struct cnxk_cpt_qp *qp; + uint8_t cdev_id; + uint16_t lmt_id; + uint16_t qp_id; + int ret; + + ec_mdata = cnxk_event_crypto_mdata_get(op); + if (!ec_mdata) { + rte_errno = EINVAL; + return 0; + } + + cdev_id = ec_mdata->request_info.cdev_id; + qp_id = ec_mdata->request_info.queue_pair_id; + qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id]; + rsp_info = &ec_mdata->response_info; + + if (unlikely(!qp->ca.enabled)) { + rte_errno = EINVAL; + return 0; + } + + if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) { + rte_errno = ENOMEM; + return 0; + } + infl_req->op_flags = 0; + + lmt_base = qp->lmtline.lmt_base; + ROC_LMT_BASE_ID_GET(lmt_base, lmt_id); + inst = (struct cpt_inst_s *)lmt_base; + + ret = cn10k_cpt_fill_inst(qp, &op, inst, infl_req); + if (unlikely(ret != 1)) { + plt_dp_err("Could not process op: %p", op); + rte_mempool_put(qp->ca.req_mp, infl_req); + return 0; + } + + infl_req->cop = op; + infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE; + infl_req->qp = qp; + inst->w0.u64 = 0; + inst->res_addr = (uint64_t)&infl_req->res; + inst->w2.u64 = CNXK_CPT_INST_W2( + (RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id, + rsp_info->sched_type, rsp_info->queue_id, 0); + inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req); + + if (roc_cpt_is_iq_full(&qp->lf)) { + rte_mempool_put(qp->ca.req_mp, infl_req); + rte_errno = EAGAIN; + return 0; + } + + if (!rsp_info->sched_type) + roc_sso_hws_head_wait(tag_op); + + lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id; + roc_lmt_submit_steorl(lmt_arg, qp->lmtline.io_addr); + + rte_io_wmb(); + + return 1; +} + +static inline void +cn10k_cpt_sec_post_process(struct rte_crypto_op *cop, + struct cpt_cn10k_res_s *res) +{ + struct rte_mbuf *m = cop->sym->m_src; + const uint16_t m_len = res->rlen; + + m->data_len = m_len; + m->pkt_len = m_len; +} + +static inline void +cn10k_cpt_sec_ucc_process(struct rte_crypto_op *cop, + struct cpt_inflight_req *infl_req, + const uint8_t uc_compcode) +{ + struct cn10k_sec_session *sess; + struct cn10k_ipsec_sa *sa; + struct rte_mbuf *mbuf; + + if (uc_compcode == ROC_IE_OT_UCC_SUCCESS_SA_SOFTEXP_FIRST) + cop->aux_flags = RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY; + + if (!(infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND)) + return; + + sess = get_sec_session_private_data(cop->sym->sec_session); + sa = &sess->sa; + + mbuf = cop->sym->m_src; + + switch (uc_compcode) { + case ROC_IE_OT_UCC_SUCCESS: + if (sa->ip_csum_enable) + mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + break; + case ROC_IE_OT_UCC_SUCCESS_PKT_IP_BADCSUM: + mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; + break; + case ROC_IE_OT_UCC_SUCCESS_PKT_L4_GOODCSUM: + mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + if (sa->ip_csum_enable) + mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + break; + case ROC_IE_OT_UCC_SUCCESS_PKT_L4_BADCSUM: + mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + if (sa->ip_csum_enable) + mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; + break; + default: + break; + } +} + static inline void cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop, - struct cpt_inflight_req *infl_req) + struct cpt_inflight_req *infl_req, + struct cpt_cn10k_res_s *res) { - struct cpt_cn10k_res_s *res = (struct cpt_cn10k_res_s *)&infl_req->res; + const uint8_t uc_compcode = res->uc_compcode; + const uint8_t compcode = res->compcode; unsigned int sz; - if (likely(res->compcode == CPT_COMP_GOOD || - res->compcode == CPT_COMP_WARN)) { - if (unlikely(res->uc_compcode)) { + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC && + cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { + if (likely(compcode == CPT_COMP_WARN)) { + /* Success with additional info */ + cn10k_cpt_sec_ucc_process(cop, infl_req, uc_compcode); + cn10k_cpt_sec_post_process(cop, res); + } else { cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + plt_dp_info("HW completion code 0x%x", res->compcode); + if (compcode == CPT_COMP_GOOD) { + plt_dp_info( + "Request failed with microcode error"); + plt_dp_info("MC completion code 0x%x", + uc_compcode); + } + } + + return; + } + + if (likely(compcode == CPT_COMP_GOOD || compcode == CPT_COMP_WARN)) { + if (unlikely(uc_compcode)) { + if (uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE) + cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + else + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; plt_dp_info("Request failed with microcode error"); plt_dp_info("MC completion code 0x%x", @@ -209,12 +441,29 @@ cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, goto temp_sess_free; } - cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { + /* Verify authentication data if required */ + if (unlikely(infl_req->op_flags & + CPT_OP_FLAGS_AUTH_VERIFY)) { + uintptr_t *rsp = infl_req->mdata; + compl_auth_verify(cop, (uint8_t *)rsp[0], + rsp[1]); + } + } else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { + struct rte_crypto_asym_op *op = cop->asym; + uintptr_t *mdata = infl_req->mdata; + struct cnxk_ae_sess *sess; + + sess = get_asym_session_private_data( + op->session, cn10k_cryptodev_driver_id); + + cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]); + } } else { cop->status = RTE_CRYPTO_OP_STATUS_ERROR; plt_dp_info("HW completion code 0x%x", res->compcode); - switch (res->compcode) { + switch (compcode) { case CPT_COMP_INSTERR: plt_dp_err("Request failed with instruction error"); break; @@ -244,32 +493,62 @@ temp_sess_free: } } +uintptr_t +cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1) +{ + struct cpt_inflight_req *infl_req; + struct rte_crypto_op *cop; + struct cnxk_cpt_qp *qp; + union cpt_res_s res; + + infl_req = (struct cpt_inflight_req *)(get_work1); + cop = infl_req->cop; + qp = infl_req->qp; + + res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED); + + cn10k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req, &res.cn10k); + + if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF)) + rte_mempool_put(qp->meta_info.pool, infl_req->mdata); + + rte_mempool_put(qp->ca.req_mp, infl_req); + return (uintptr_t)cop; +} + static uint16_t cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) { struct cpt_inflight_req *infl_req; struct cnxk_cpt_qp *qp = qptr; struct pending_queue *pend_q; - struct cpt_cn10k_res_s *res; + uint64_t infl_cnt, pq_tail; struct rte_crypto_op *cop; - int i, nb_pending; + union cpt_res_s res; + int i; pend_q = &qp->pend_q; - nb_pending = pend_q->pending_count; + const uint64_t pq_mask = pend_q->pq_mask; + + pq_tail = pend_q->tail; + infl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask); + nb_ops = RTE_MIN(nb_ops, infl_cnt); - if (nb_ops > nb_pending) - nb_ops = nb_pending; + /* Ensure infl_cnt isn't read before data lands */ + rte_atomic_thread_fence(__ATOMIC_ACQUIRE); for (i = 0; i < nb_ops; i++) { - infl_req = &pend_q->req_queue[pend_q->deq_head]; + infl_req = &pend_q->req_queue[pq_tail]; - res = (struct cpt_cn10k_res_s *)&infl_req->res; + res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], + __ATOMIC_RELAXED); - if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) { + if (unlikely(res.cn10k.compcode == CPT_COMP_NOT_DONE)) { if (unlikely(rte_get_timer_cycles() > pend_q->time_out)) { plt_err("Request timed out"); + cnxk_cpt_dump_on_err(qp); pend_q->time_out = rte_get_timer_cycles() + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz(); @@ -277,19 +556,19 @@ cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) break; } - MOD_INC(pend_q->deq_head, qp->lf.nb_desc); + pending_queue_advance(&pq_tail, pq_mask); cop = infl_req->cop; ops[i] = cop; - cn10k_cpt_dequeue_post_process(qp, cop, infl_req); + cn10k_cpt_dequeue_post_process(qp, cop, infl_req, &res.cn10k); if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF)) rte_mempool_put(qp->meta_info.pool, infl_req->mdata); } - pend_q->pending_count -= i; + pend_q->tail = pq_tail; return i; } @@ -332,8 +611,8 @@ struct rte_cryptodev_ops cn10k_cpt_ops = { .sym_session_clear = cnxk_cpt_sym_session_clear, /* Asymmetric crypto ops */ - .asym_session_get_size = NULL, - .asym_session_configure = NULL, - .asym_session_clear = NULL, + .asym_session_get_size = cnxk_ae_session_size_get, + .asym_session_configure = cnxk_ae_session_cfg, + .asym_session_clear = cnxk_ae_session_clear, };