#include "otx2_cryptodev_mbox.h"
#include "otx2_cryptodev_ops.h"
#include "otx2_cryptodev_ops_helper.h"
+#include "otx2_ipsec_anti_replay.h"
#include "otx2_ipsec_po_ops.h"
#include "otx2_mbox.h"
#include "otx2_sec_idev.h"
size_div40 = (iq_len + 40 - 1) / 40 + 1;
/* For pending queue */
- len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+ len = iq_len * sizeof(uintptr_t);
/* Space for instruction group memory */
len += size_div40 * 16;
}
/* Initialize pending queue */
- qp->pend_q.rid_queue = (struct rid *)va;
+ qp->pend_q.req_queue = (uintptr_t *)va;
qp->pend_q.enq_tail = 0;
qp->pend_q.deq_head = 0;
qp->pend_q.pending_count = 0;
- used_len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+ used_len = iq_len * sizeof(uintptr_t);
used_len += size_div40 * 16;
used_len = RTE_ALIGN(used_len, pg_sz);
iova += used_len;
qp->iq_dma_addr = iova;
qp->id = qp_id;
- qp->base = OTX2_CPT_LF_BAR2(vf, qp_id);
+ qp->blkaddr = vf->lf_blkaddr[qp_id];
+ qp->base = OTX2_CPT_LF_BAR2(vf, qp->blkaddr, qp_id);
lmtline = vf->otx2_dev.bar2 +
(RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
} while (lmt_status == 0);
- pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)req;
+ pend_q->req_queue[pend_q->enq_tail] = (uintptr_t)req;
/* We will use soft queue length here to limit requests */
MOD_INC(pend_q->enq_tail, OTX2_CPT_DEFAULT_CMD_QLEN);
otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
struct pending_queue *pend_q)
{
+ uint32_t winsz, esn_low = 0, esn_hi = 0, seql = 0, seqh = 0;
+ struct rte_mbuf *m_src = op->sym->m_src;
struct otx2_sec_session_ipsec_lp *sess;
struct otx2_ipsec_po_sa_ctl *ctl_wrd;
+ struct otx2_ipsec_po_in_sa *sa;
struct otx2_sec_session *priv;
struct cpt_request_info *req;
+ uint64_t seq_in_sa, seq = 0;
+ uint8_t esn;
int ret;
priv = get_sec_session_private_data(op->sym->sec_session);
sess = &priv->ipsec.lp;
+ sa = &sess->in_sa;
- ctl_wrd = &sess->in_sa.ctl;
+ ctl_wrd = &sa->ctl;
+ esn = ctl_wrd->esn_en;
+ winsz = sa->replay_win_sz;
if (ctl_wrd->direction == OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND)
ret = process_outb_sa(op, sess, &qp->meta_info, (void **)&req);
- else
+ else {
+ if (winsz) {
+ esn_low = rte_be_to_cpu_32(sa->esn_low);
+ esn_hi = rte_be_to_cpu_32(sa->esn_hi);
+ seql = *rte_pktmbuf_mtod_offset(m_src, uint32_t *,
+ sizeof(struct rte_ipv4_hdr) + 4);
+ seql = rte_be_to_cpu_32(seql);
+
+ if (!esn)
+ seq = (uint64_t)seql;
+ else {
+ seqh = anti_replay_get_seqh(winsz, seql, esn_hi,
+ esn_low);
+ seq = ((uint64_t)seqh << 32) | seql;
+ }
+
+ if (unlikely(seq == 0))
+ return IPSEC_ANTI_REPLAY_FAILED;
+
+ ret = anti_replay_check(sa->replay, seq, winsz);
+ if (unlikely(ret)) {
+ otx2_err("Anti replay check failed");
+ return IPSEC_ANTI_REPLAY_FAILED;
+ }
+ }
+
ret = process_inb_sa(op, sess, &qp->meta_info, (void **)&req);
+ }
if (unlikely(ret)) {
otx2_err("Crypto req : op %p, ret 0x%x", op, ret);
ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
+ if (winsz && esn) {
+ seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
+ if (seq > seq_in_sa) {
+ sa->esn_low = rte_cpu_to_be_32(seql);
+ sa->esn_hi = rte_cpu_to_be_32(seqh);
+ }
+ }
+
return ret;
}
struct cpt_request_info *req;
struct rte_crypto_op *cop;
uint8_t cc[nb_ops];
- struct rid *rid;
uintptr_t *rsp;
void *metabuf;
nb_ops = nb_pending;
for (i = 0; i < nb_ops; i++) {
- rid = &pend_q->rid_queue[pend_q->deq_head];
- req = (struct cpt_request_info *)(rid->rid);
+ req = (struct cpt_request_info *)
+ pend_q->req_queue[pend_q->deq_head];
cc[i] = otx2_cpt_compcode_get(req);