net/mlx5/linux: fix firmware version
[dpdk.git] / drivers / crypto / octeontx2 / otx2_cryptodev_ops.c
index be37d3c..0cf8e80 100644 (file)
@@ -7,6 +7,7 @@
 #include <rte_cryptodev_pmd.h>
 #include <rte_errno.h>
 #include <rte_ethdev.h>
+#include <rte_event_crypto_adapter.h>
 
 #include "otx2_cryptodev.h"
 #include "otx2_cryptodev_capabilities.h"
@@ -14,6 +15,7 @@
 #include "otx2_cryptodev_mbox.h"
 #include "otx2_cryptodev_ops.h"
 #include "otx2_cryptodev_ops_helper.h"
+#include "otx2_ipsec_anti_replay.h"
 #include "otx2_ipsec_po_ops.h"
 #include "otx2_mbox.h"
 #include "otx2_sec_idev.h"
@@ -192,7 +194,7 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
        size_div40 = (iq_len + 40 - 1) / 40 + 1;
 
        /* For pending queue */
-       len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+       len = iq_len * sizeof(uintptr_t);
 
        /* Space for instruction group memory */
        len += size_div40 * 16;
@@ -229,19 +231,20 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
        }
 
        /* Initialize pending queue */
-       qp->pend_q.rid_queue = (struct rid *)va;
+       qp->pend_q.req_queue = (uintptr_t *)va;
        qp->pend_q.enq_tail = 0;
        qp->pend_q.deq_head = 0;
        qp->pend_q.pending_count = 0;
 
-       used_len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+       used_len = iq_len * sizeof(uintptr_t);
        used_len += size_div40 * 16;
        used_len = RTE_ALIGN(used_len, pg_sz);
        iova += used_len;
 
        qp->iq_dma_addr = iova;
        qp->id = qp_id;
-       qp->base = OTX2_CPT_LF_BAR2(vf, qp_id);
+       qp->blkaddr = vf->lf_blkaddr[qp_id];
+       qp->base = OTX2_CPT_LF_BAR2(vf, qp->blkaddr, qp_id);
 
        lmtline = vf->otx2_dev.bar2 +
                  (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
@@ -320,12 +323,16 @@ sym_xform_verify(struct rte_crypto_sym_xform *xform)
        if (xform->next) {
                if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
-                   xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+                   xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+                   (xform->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC ||
+                    xform->next->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC))
                        return -ENOTSUP;
 
                if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
                    xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
-                   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+                   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+                   (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC ||
+                    xform->next->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC))
                        return -ENOTSUP;
 
                if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
@@ -356,6 +363,7 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
 {
        struct rte_crypto_sym_xform *temp_xform = xform;
        struct cpt_sess_misc *misc;
+       vq_cmd_word3_t vq_cmd_w3;
        void *priv;
        int ret;
 
@@ -369,7 +377,7 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
        }
 
        memset(priv, 0, sizeof(struct cpt_sess_misc) +
-                       offsetof(struct cpt_ctx, fctx));
+                       offsetof(struct cpt_ctx, mc_ctx));
 
        misc = priv;
 
@@ -407,15 +415,21 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
        misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
                             sizeof(struct cpt_sess_misc);
 
+       vq_cmd_w3.u64 = 0;
+       vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
+                                                        mc_ctx);
+
        /*
         * IE engines support IPsec operations
         * SE engines support IPsec operations, Chacha-Poly and
         * Air-Crypto operations
         */
        if (misc->zsk_flag || misc->chacha_poly)
-               misc->egrp = OTX2_CPT_EGRP_SE;
+               vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE;
        else
-               misc->egrp = OTX2_CPT_EGRP_SE_IE;
+               vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE_IE;
+
+       misc->cpt_inst_w7 = vq_cmd_w3.u64;
 
        return 0;
 
@@ -425,14 +439,35 @@ priv_put:
        return -ENOTSUP;
 }
 
-static __rte_always_inline void __rte_hot
+static __rte_always_inline int32_t __rte_hot
 otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
                    struct cpt_request_info *req,
-                   void *lmtline)
+                   void *lmtline,
+                   struct rte_crypto_op *op,
+                   uint64_t cpt_inst_w7)
 {
+       union rte_event_crypto_metadata *m_data;
        union cpt_inst_s inst;
        uint64_t lmt_status;
 
+       if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+               m_data = rte_cryptodev_sym_session_get_user_data(
+                                               op->sym->session);
+               if (m_data == NULL) {
+                       rte_pktmbuf_free(op->sym->m_src);
+                       rte_crypto_op_free(op);
+                       rte_errno = EINVAL;
+                       return -EINVAL;
+               }
+       } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+                  op->private_data_offset) {
+               m_data = (union rte_event_crypto_metadata *)
+                        ((uint8_t *)op +
+                         op->private_data_offset);
+       } else {
+               return -EINVAL;
+       }
+
        inst.u[0] = 0;
        inst.s9x.res_addr = req->comp_baddr;
        inst.u[2] = 0;
@@ -441,14 +476,13 @@ otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
        inst.s9x.ei0 = req->ist.ei0;
        inst.s9x.ei1 = req->ist.ei1;
        inst.s9x.ei2 = req->ist.ei2;
-       inst.s9x.ei3 = req->ist.ei3;
-
-       inst.s9x.qord = 1;
-       inst.s9x.grp = qp->ev.queue_id;
-       inst.s9x.tt = qp->ev.sched_type;
-       inst.s9x.tag = (RTE_EVENT_TYPE_CRYPTODEV << 28) |
-                       qp->ev.flow_id;
-       inst.s9x.wq_ptr = (uint64_t)req >> 3;
+       inst.s9x.ei3 = cpt_inst_w7;
+
+       inst.u[2] = (((RTE_EVENT_TYPE_CRYPTODEV << 28) |
+                     m_data->response_info.flow_id) |
+                    ((uint64_t)m_data->response_info.sched_type << 32) |
+                    ((uint64_t)m_data->response_info.queue_id << 34));
+       inst.u[3] = 1 | (((uint64_t)req >> 3) << 3);
        req->qp = qp;
 
        do {
@@ -465,21 +499,22 @@ otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
                lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
        } while (lmt_status == 0);
 
+       return 0;
 }
 
 static __rte_always_inline int32_t __rte_hot
 otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
                     struct pending_queue *pend_q,
-                    struct cpt_request_info *req)
+                    struct cpt_request_info *req,
+                    struct rte_crypto_op *op,
+                    uint64_t cpt_inst_w7)
 {
        void *lmtline = qp->lmtline;
        union cpt_inst_s inst;
        uint64_t lmt_status;
 
-       if (qp->ca_enable) {
-               otx2_ca_enqueue_req(qp, req, lmtline);
-               return 0;
-       }
+       if (qp->ca_enable)
+               return otx2_ca_enqueue_req(qp, req, lmtline, op, cpt_inst_w7);
 
        if (unlikely(pend_q->pending_count >= OTX2_CPT_DEFAULT_CMD_QLEN))
                return -EAGAIN;
@@ -492,7 +527,7 @@ otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
        inst.s9x.ei0 = req->ist.ei0;
        inst.s9x.ei1 = req->ist.ei1;
        inst.s9x.ei2 = req->ist.ei2;
-       inst.s9x.ei3 = req->ist.ei3;
+       inst.s9x.ei3 = cpt_inst_w7;
 
        req->time_out = rte_get_timer_cycles() +
                        DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
@@ -511,7 +546,7 @@ otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
                lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
        } while (lmt_status == 0);
 
-       pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)req;
+       pend_q->req_queue[pend_q->enq_tail] = (uintptr_t)req;
 
        /* We will use soft queue length here to limit requests */
        MOD_INC(pend_q->enq_tail, OTX2_CPT_DEFAULT_CMD_QLEN);
@@ -529,7 +564,6 @@ otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
        struct rte_crypto_asym_op *asym_op = op->asym;
        struct asym_op_params params = {0};
        struct cpt_asym_sess_misc *sess;
-       vq_cmd_word3_t *w3;
        uintptr_t *cop;
        void *mdata;
        int ret;
@@ -584,11 +618,8 @@ otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
                goto req_fail;
        }
 
-       /* Set engine group of AE */
-       w3 = (vq_cmd_word3_t *)&params.req->ist.ei3;
-       w3->s.grp = OTX2_CPT_EGRP_AE;
-
-       ret = otx2_cpt_enqueue_req(qp, pend_q, params.req);
+       ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, op,
+                                  sess->cpt_inst_w7);
 
        if (unlikely(ret)) {
                CPT_LOG_DP_ERR("Could not enqueue crypto req");
@@ -610,7 +641,6 @@ otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
        struct rte_crypto_sym_op *sym_op = op->sym;
        struct cpt_request_info *req;
        struct cpt_sess_misc *sess;
-       vq_cmd_word3_t *w3;
        uint64_t cpt_op;
        void *mdata;
        int ret;
@@ -633,10 +663,7 @@ otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
                return ret;
        }
 
-       w3 = ((vq_cmd_word3_t *)(&req->ist.ei3));
-       w3->s.grp = sess->egrp;
-
-       ret = otx2_cpt_enqueue_req(qp, pend_q, req);
+       ret = otx2_cpt_enqueue_req(qp, pend_q, req, op, sess->cpt_inst_w7);
 
        if (unlikely(ret)) {
                /* Free buffer allocated by fill params routines */
@@ -650,28 +677,70 @@ static __rte_always_inline int __rte_hot
 otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
                     struct pending_queue *pend_q)
 {
+       uint32_t winsz, esn_low = 0, esn_hi = 0, seql = 0, seqh = 0;
+       struct rte_mbuf *m_src = op->sym->m_src;
        struct otx2_sec_session_ipsec_lp *sess;
        struct otx2_ipsec_po_sa_ctl *ctl_wrd;
+       struct otx2_ipsec_po_in_sa *sa;
        struct otx2_sec_session *priv;
        struct cpt_request_info *req;
+       uint64_t seq_in_sa, seq = 0;
+       uint8_t esn;
        int ret;
 
        priv = get_sec_session_private_data(op->sym->sec_session);
        sess = &priv->ipsec.lp;
+       sa = &sess->in_sa;
 
-       ctl_wrd = &sess->in_sa.ctl;
+       ctl_wrd = &sa->ctl;
+       esn = ctl_wrd->esn_en;
+       winsz = sa->replay_win_sz;
 
        if (ctl_wrd->direction == OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND)
                ret = process_outb_sa(op, sess, &qp->meta_info, (void **)&req);
-       else
+       else {
+               if (winsz) {
+                       esn_low = rte_be_to_cpu_32(sa->esn_low);
+                       esn_hi = rte_be_to_cpu_32(sa->esn_hi);
+                       seql = *rte_pktmbuf_mtod_offset(m_src, uint32_t *,
+                               sizeof(struct rte_ipv4_hdr) + 4);
+                       seql = rte_be_to_cpu_32(seql);
+
+                       if (!esn)
+                               seq = (uint64_t)seql;
+                       else {
+                               seqh = anti_replay_get_seqh(winsz, seql, esn_hi,
+                                               esn_low);
+                               seq = ((uint64_t)seqh << 32) | seql;
+                       }
+
+                       if (unlikely(seq == 0))
+                               return IPSEC_ANTI_REPLAY_FAILED;
+
+                       ret = anti_replay_check(sa->replay, seq, winsz);
+                       if (unlikely(ret)) {
+                               otx2_err("Anti replay check failed");
+                               return IPSEC_ANTI_REPLAY_FAILED;
+                       }
+               }
+
                ret = process_inb_sa(op, sess, &qp->meta_info, (void **)&req);
+       }
 
        if (unlikely(ret)) {
                otx2_err("Crypto req : op %p, ret 0x%x", op, ret);
                return ret;
        }
 
-       ret = otx2_cpt_enqueue_req(qp, pend_q, req);
+       ret = otx2_cpt_enqueue_req(qp, pend_q, req, op, sess->cpt_inst_w7);
+
+       if (winsz && esn) {
+               seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
+               if (seq > seq_in_sa) {
+                       sa->esn_low = rte_cpu_to_be_32(seql);
+                       sa->esn_hi = rte_cpu_to_be_32(seqh);
+               }
+       }
 
        return ret;
 }
@@ -823,7 +892,8 @@ otx2_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
 
        /* Separate out sign r and s components */
        memcpy(ecdsa->r.data, req->rptr, prime_len);
-       memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+       memcpy(ecdsa->s.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
+              prime_len);
        ecdsa->r.length = prime_len;
        ecdsa->s.length = prime_len;
 }
@@ -836,7 +906,8 @@ otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
        int prime_len = ec_grp[ec->curveid].prime.length;
 
        memcpy(ecpm->r.x.data, req->rptr, prime_len);
-       memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+       memcpy(ecpm->r.y.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
+              prime_len);
        ecpm->r.x.length = prime_len;
        ecpm->r.y.length = prime_len;
 }
@@ -882,21 +953,22 @@ otx2_cpt_sec_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
        struct rte_mbuf *m = sym_op->m_src;
        struct rte_ipv6_hdr *ip6;
        struct rte_ipv4_hdr *ip;
-       uint16_t m_len;
+       uint16_t m_len = 0;
        int mdata_len;
        char *data;
 
        mdata_len = (int)rsp[3];
        rte_pktmbuf_trim(m, mdata_len);
 
-       if ((word0->s.opcode & 0xff) == OTX2_IPSEC_PO_PROCESS_IPSEC_INB) {
+       if (word0->s.opcode.major == OTX2_IPSEC_PO_PROCESS_IPSEC_INB) {
                data = rte_pktmbuf_mtod(m, char *);
 
-               if (rsp[4] == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+               if (rsp[4] == OTX2_IPSEC_PO_TRANSPORT ||
+                   rsp[4] == OTX2_IPSEC_PO_TUNNEL_IPV4) {
                        ip = (struct rte_ipv4_hdr *)(data +
                                OTX2_IPSEC_PO_INB_RPTR_HDR);
                        m_len = rte_be_to_cpu_16(ip->total_length);
-               } else {
+               } else if (rsp[4] == OTX2_IPSEC_PO_TUNNEL_IPV6) {
                        ip6 = (struct rte_ipv6_hdr *)(data +
                                OTX2_IPSEC_PO_INB_RPTR_HDR);
                        m_len = rte_be_to_cpu_16(ip6->payload_len) +
@@ -975,7 +1047,6 @@ otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
        struct cpt_request_info *req;
        struct rte_crypto_op *cop;
        uint8_t cc[nb_ops];
-       struct rid *rid;
        uintptr_t *rsp;
        void *metabuf;
 
@@ -987,8 +1058,8 @@ otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
                nb_ops = nb_pending;
 
        for (i = 0; i < nb_ops; i++) {
-               rid = &pend_q->rid_queue[pend_q->deq_head];
-               req = (struct cpt_request_info *)(rid->rid);
+               req = (struct cpt_request_info *)
+                               pend_q->req_queue[pend_q->deq_head];
 
                cc[i] = otx2_cpt_compcode_get(req);
 
@@ -1266,6 +1337,7 @@ otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
                          struct rte_mempool *pool)
 {
        struct cpt_asym_sess_misc *priv;
+       vq_cmd_word3_t vq_cmd_w3;
        int ret;
 
        CPT_PMD_INIT_FUNC_TRACE();
@@ -1286,7 +1358,12 @@ otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
                return ret;
        }
 
+       vq_cmd_w3.u64 = 0;
+       vq_cmd_w3.s.grp = OTX2_CPT_EGRP_AE;
+       priv->cpt_inst_w7 = vq_cmd_w3.u64;
+
        set_asym_session_private_data(sess, dev->driver_id, priv);
+
        return 0;
 }