X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Focteontx2%2Fotx2_cryptodev_ops.c;h=cec20b5c6d3f281db537cbaa29b4c3b0790e9c0a;hb=330a70b773f04060e3026004d6291182a5cbefaf;hp=8cd2fc7ee4116a76909b6def7a2ed177ea935c89;hpb=629ac988d59ed7fa5396c1db8c798bbb010909f8;p=dpdk.git diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c index 8cd2fc7ee4..cec20b5c6d 100644 --- a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c +++ b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c @@ -14,6 +14,7 @@ #include "otx2_cryptodev_mbox.h" #include "otx2_cryptodev_ops.h" #include "otx2_cryptodev_ops_helper.h" +#include "otx2_ipsec_anti_replay.h" #include "otx2_ipsec_po_ops.h" #include "otx2_mbox.h" #include "otx2_sec_idev.h" @@ -192,7 +193,7 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id, size_div40 = (iq_len + 40 - 1) / 40 + 1; /* For pending queue */ - len = iq_len * RTE_ALIGN(sizeof(struct rid), 8); + len = iq_len * sizeof(uintptr_t); /* Space for instruction group memory */ len += size_div40 * 16; @@ -229,19 +230,20 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id, } /* Initialize pending queue */ - qp->pend_q.rid_queue = (struct rid *)va; + qp->pend_q.req_queue = (uintptr_t *)va; qp->pend_q.enq_tail = 0; qp->pend_q.deq_head = 0; qp->pend_q.pending_count = 0; - used_len = iq_len * RTE_ALIGN(sizeof(struct rid), 8); + used_len = iq_len * sizeof(uintptr_t); used_len += size_div40 * 16; used_len = RTE_ALIGN(used_len, pg_sz); iova += used_len; qp->iq_dma_addr = iova; qp->id = qp_id; - qp->base = OTX2_CPT_LF_BAR2(vf, qp_id); + qp->blkaddr = vf->lf_blkaddr[qp_id]; + qp->base = OTX2_CPT_LF_BAR2(vf, qp->blkaddr, qp_id); lmtline = vf->otx2_dev.bar2 + (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) + @@ -520,7 +522,7 @@ otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp, lmt_status = otx2_lmt_submit(qp->lf_nq_reg); } while (lmt_status == 0); - pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)req; + pend_q->req_queue[pend_q->enq_tail] = (uintptr_t)req; /* We will use soft queue length here to limit requests */ MOD_INC(pend_q->enq_tail, OTX2_CPT_DEFAULT_CMD_QLEN); @@ -650,21 +652,55 @@ static __rte_always_inline int __rte_hot otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op, struct pending_queue *pend_q) { + uint32_t winsz, esn_low = 0, esn_hi = 0, seql = 0, seqh = 0; + struct rte_mbuf *m_src = op->sym->m_src; struct otx2_sec_session_ipsec_lp *sess; struct otx2_ipsec_po_sa_ctl *ctl_wrd; + struct otx2_ipsec_po_in_sa *sa; struct otx2_sec_session *priv; struct cpt_request_info *req; + uint64_t seq_in_sa, seq = 0; + uint8_t esn; int ret; priv = get_sec_session_private_data(op->sym->sec_session); sess = &priv->ipsec.lp; + sa = &sess->in_sa; - ctl_wrd = &sess->in_sa.ctl; + ctl_wrd = &sa->ctl; + esn = ctl_wrd->esn_en; + winsz = sa->replay_win_sz; if (ctl_wrd->direction == OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND) ret = process_outb_sa(op, sess, &qp->meta_info, (void **)&req); - else + else { + if (winsz) { + esn_low = rte_be_to_cpu_32(sa->esn_low); + esn_hi = rte_be_to_cpu_32(sa->esn_hi); + seql = *rte_pktmbuf_mtod_offset(m_src, uint32_t *, + sizeof(struct rte_ipv4_hdr) + 4); + seql = rte_be_to_cpu_32(seql); + + if (!esn) + seq = (uint64_t)seql; + else { + seqh = anti_replay_get_seqh(winsz, seql, esn_hi, + esn_low); + seq = ((uint64_t)seqh << 32) | seql; + } + + if (unlikely(seq == 0)) + return IPSEC_ANTI_REPLAY_FAILED; + + ret = anti_replay_check(sa->replay, seq, winsz); + if (unlikely(ret)) { + otx2_err("Anti replay check failed"); + return IPSEC_ANTI_REPLAY_FAILED; + } + } + ret = process_inb_sa(op, sess, &qp->meta_info, (void **)&req); + } if (unlikely(ret)) { otx2_err("Crypto req : op %p, ret 0x%x", op, ret); @@ -673,6 +709,14 @@ otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op, ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7); + if (winsz && esn) { + seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low; + if (seq > seq_in_sa) { + sa->esn_low = rte_cpu_to_be_32(seql); + sa->esn_hi = rte_cpu_to_be_32(seqh); + } + } + return ret; } @@ -823,7 +867,8 @@ otx2_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa, /* Separate out sign r and s components */ memcpy(ecdsa->r.data, req->rptr, prime_len); - memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len); + memcpy(ecdsa->s.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8), + prime_len); ecdsa->r.length = prime_len; ecdsa->s.length = prime_len; } @@ -836,7 +881,8 @@ otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm, int prime_len = ec_grp[ec->curveid].prime.length; memcpy(ecpm->r.x.data, req->rptr, prime_len); - memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len); + memcpy(ecpm->r.y.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8), + prime_len); ecpm->r.x.length = prime_len; ecpm->r.y.length = prime_len; } @@ -975,7 +1021,6 @@ otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) struct cpt_request_info *req; struct rte_crypto_op *cop; uint8_t cc[nb_ops]; - struct rid *rid; uintptr_t *rsp; void *metabuf; @@ -987,8 +1032,8 @@ otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) nb_ops = nb_pending; for (i = 0; i < nb_ops; i++) { - rid = &pend_q->rid_queue[pend_q->deq_head]; - req = (struct cpt_request_info *)(rid->rid); + req = (struct cpt_request_info *) + pend_q->req_queue[pend_q->deq_head]; cc[i] = otx2_cpt_compcode_get(req);