crypto/cnxk: add IPsec datapath
authorTejasree Kondoj <ktejasree@marvell.com>
Tue, 29 Jun 2021 07:34:31 +0000 (13:04 +0530)
committerAkhil Goyal <gakhil@marvell.com>
Wed, 7 Jul 2021 19:15:08 +0000 (21:15 +0200)
Add rte_security handling in cn10k crypto enqueue
dequeue ops to support IPSec protocol offload.

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Srujana Challa <schalla@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
Acked-by: Akhil Goyal <gakhil@marvell.com>
drivers/crypto/cnxk/cn10k_cryptodev_ops.c
drivers/crypto/cnxk/cn10k_ipsec_la_ops.h [new file with mode: 0644]

index 1a30908..8005a25 100644 (file)
@@ -4,9 +4,12 @@
 
 #include <rte_cryptodev.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_ip.h>
 
 #include "cn10k_cryptodev.h"
 #include "cn10k_cryptodev_ops.h"
+#include "cn10k_ipsec_la_ops.h"
+#include "cn10k_ipsec.h"
 #include "cnxk_cryptodev.h"
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_se.h"
@@ -41,6 +44,38 @@ sess_put:
        return NULL;
 }
 
+static __rte_always_inline int __rte_hot
+cpt_sec_inst_fill(struct rte_crypto_op *op, struct cn10k_sec_session *sess,
+                 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
+{
+       struct rte_crypto_sym_op *sym_op = op->sym;
+       union roc_ot_ipsec_sa_word2 *w2;
+       struct cn10k_ipsec_sa *sa;
+       int ret;
+
+       if (unlikely(sym_op->m_dst && sym_op->m_dst != sym_op->m_src)) {
+               plt_dp_err("Out of place is not supported");
+               return -ENOTSUP;
+       }
+
+       if (unlikely(!rte_pktmbuf_is_contiguous(sym_op->m_src))) {
+               plt_dp_err("Scatter Gather mode is not supported");
+               return -ENOTSUP;
+       }
+
+       sa = &sess->sa;
+       w2 = (union roc_ot_ipsec_sa_word2 *)&sa->in_sa.w2;
+
+       if (w2->s.dir == ROC_IE_OT_SA_DIR_OUTBOUND)
+               ret = process_outb_sa(op, sa, inst);
+       else {
+               infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
+               ret = process_inb_sa(op, sa, inst);
+       }
+
+       return ret;
+}
+
 static __rte_always_inline int __rte_hot
 cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
                  struct cnxk_se_sess *sess, struct cpt_inflight_req *infl_req,
@@ -64,6 +99,7 @@ static inline int
 cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
                    struct cpt_inst_s inst[], struct cpt_inflight_req *infl_req)
 {
+       struct cn10k_sec_session *sec_sess;
        struct rte_crypto_sym_op *sym_op;
        struct cnxk_se_sess *sess;
        struct rte_crypto_op *op;
@@ -79,7 +115,15 @@ cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
        sym_op = op->sym;
 
        if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
-               if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+               if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+                       sec_sess = get_sec_session_private_data(
+                               sym_op->sec_session);
+                       ret = cpt_sec_inst_fill(op, sec_sess, infl_req,
+                                               &inst[0]);
+                       if (unlikely(ret))
+                               return 0;
+                       w7 = sec_sess->sa.inst.w7;
+               } else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
                        sess = get_sym_session_private_data(
                                sym_op->session, cn10k_cryptodev_driver_id);
                        ret = cpt_sym_inst_fill(qp, op, sess, infl_req,
@@ -195,6 +239,34 @@ update_pending:
        return count + i;
 }
 
+static inline void
+cn10k_cpt_sec_post_process(struct rte_crypto_op *cop,
+                          struct cpt_inflight_req *infl_req)
+{
+       struct rte_crypto_sym_op *sym_op = cop->sym;
+       struct rte_mbuf *m = sym_op->m_src;
+       struct rte_ipv6_hdr *ip6;
+       struct rte_ipv4_hdr *ip;
+       uint16_t m_len;
+
+       if (infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND) {
+               ip = (struct rte_ipv4_hdr *)rte_pktmbuf_mtod(m, char *);
+
+               if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) ==
+                   IPVERSION) {
+                       m_len = rte_be_to_cpu_16(ip->total_length);
+               } else {
+                       PLT_ASSERT(((ip->version_ihl & 0xf0) >>
+                                   RTE_IPV4_IHL_MULTIPLIER) == IPV6_VERSION);
+                       ip6 = (struct rte_ipv6_hdr *)ip;
+                       m_len = rte_be_to_cpu_16(ip6->payload_len) +
+                               sizeof(struct rte_ipv6_hdr);
+               }
+               m->data_len = m_len;
+               m->pkt_len = m_len;
+       }
+}
+
 static inline void
 cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
                               struct rte_crypto_op *cop,
@@ -219,6 +291,10 @@ cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
 
                cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
                if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+                       if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+                               cn10k_cpt_sec_post_process(cop, infl_req);
+                               return;
+                       }
 
                        /* Verify authentication data if required */
                        if (unlikely(infl_req->op_flags &
diff --git a/drivers/crypto/cnxk/cn10k_ipsec_la_ops.h b/drivers/crypto/cnxk/cn10k_ipsec_la_ops.h
new file mode 100644 (file)
index 0000000..1e9ebb5
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef __CN10K_IPSEC_LA_OPS_H__
+#define __CN10K_IPSEC_LA_OPS_H__
+
+#include <rte_crypto_sym.h>
+#include <rte_security.h>
+
+#include "cn10k_cryptodev.h"
+#include "cn10k_ipsec.h"
+#include "cnxk_cryptodev.h"
+
+static __rte_always_inline int32_t
+ipsec_po_out_rlen_get(struct cn10k_ipsec_sa *sess, uint32_t plen)
+{
+       uint32_t enc_payload_len;
+
+       enc_payload_len =
+               RTE_ALIGN_CEIL(plen + sess->roundup_len, sess->roundup_byte);
+
+       return sess->partial_len + enc_payload_len;
+}
+
+static __rte_always_inline int
+process_outb_sa(struct rte_crypto_op *cop, struct cn10k_ipsec_sa *sess,
+               struct cpt_inst_s *inst)
+{
+       struct rte_crypto_sym_op *sym_op = cop->sym;
+       struct rte_mbuf *m_src = sym_op->m_src;
+       uint32_t dlen, rlen, extend_tail;
+       char *mdata;
+
+       dlen = rte_pktmbuf_pkt_len(m_src);
+       rlen = ipsec_po_out_rlen_get(sess, dlen);
+
+       extend_tail = rlen - dlen;
+
+       mdata = rte_pktmbuf_append(m_src, extend_tail);
+       if (unlikely(mdata == NULL)) {
+               plt_dp_err("Not enough tail room");
+               return -ENOMEM;
+       }
+
+       /* Prepare CPT instruction */
+       inst->w4.u64 = sess->inst.w4;
+       inst->w4.s.dlen = dlen;
+       inst->dptr = rte_pktmbuf_iova(m_src);
+       inst->rptr = inst->dptr;
+
+       return 0;
+}
+
+static __rte_always_inline int
+process_inb_sa(struct rte_crypto_op *cop, struct cn10k_ipsec_sa *sa,
+              struct cpt_inst_s *inst)
+{
+       struct rte_crypto_sym_op *sym_op = cop->sym;
+       struct rte_mbuf *m_src = sym_op->m_src;
+       uint32_t dlen;
+
+       dlen = rte_pktmbuf_pkt_len(m_src);
+
+       /* Prepare CPT instruction */
+       inst->w4.u64 = sa->inst.w4;
+       inst->w4.s.dlen = dlen;
+       inst->dptr = rte_pktmbuf_iova(m_src);
+       inst->rptr = inst->dptr;
+
+       return 0;
+}
+
+#endif /* __CN10K_IPSEC_LA_OPS_H__ */