2 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(C) 2019 Marvell International Ltd.
6 #ifndef __OTX2_IPSEC_PO_OPS_H__
7 #define __OTX2_IPSEC_PO_OPS_H__
9 #include <rte_crypto_sym.h>
10 #include <rte_security.h>
12 #include "otx2_cryptodev.h"
13 #include "otx2_security.h"
15 static __rte_always_inline int32_t
16 otx2_ipsec_po_out_rlen_get(struct otx2_sec_session_ipsec_lp *sess,
19 uint32_t enc_payload_len;
21 enc_payload_len = RTE_ALIGN_CEIL(plen + sess->roundup_len,
24 return sess->partial_len + enc_payload_len;
27 static __rte_always_inline struct cpt_request_info *
28 alloc_request_struct(char *maddr, void *cop, int mdata_len,
29 enum otx2_ipsec_po_mode_type mode_type)
31 struct cpt_request_info *req;
32 struct cpt_meta_info *meta;
36 meta = (void *)RTE_PTR_ALIGN((uint8_t *)maddr, 16);
38 op = (uintptr_t *)meta->deq_op_info;
40 resp_addr = (uint8_t *)&meta->cpt_res;
42 req->completion_addr = (uint64_t *)((uint8_t *)resp_addr);
43 *req->completion_addr = COMPLETION_CODE_INIT;
44 req->comp_baddr = rte_mem_virt2iova(resp_addr);
47 op[0] = (uintptr_t)((uint64_t)meta | 1ull);
48 op[1] = (uintptr_t)cop;
49 op[2] = (uintptr_t)req;
56 static __rte_always_inline int
57 process_outb_sa(struct rte_crypto_op *cop,
58 struct otx2_sec_session_ipsec_lp *sess,
59 struct cpt_qp_meta_info *m_info, void **prep_req)
61 uint32_t dlen, rlen, extend_head, extend_tail;
62 struct rte_crypto_sym_op *sym_op = cop->sym;
63 struct rte_mbuf *m_src = sym_op->m_src;
64 struct otx2_ipsec_po_sa_ctl *ctl_wrd;
65 struct cpt_request_info *req = NULL;
66 struct otx2_ipsec_po_out_hdr *hdr;
67 struct otx2_ipsec_po_out_sa *sa;
68 int hdr_len, mdata_len, ret = 0;
74 hdr_len = sizeof(*hdr);
76 dlen = rte_pktmbuf_pkt_len(m_src) + hdr_len;
77 rlen = otx2_ipsec_po_out_rlen_get(sess, dlen - hdr_len);
79 extend_head = hdr_len + RTE_ETHER_HDR_LEN;
80 extend_tail = rlen - dlen;
81 mdata_len = m_info->lb_mlen + 8;
83 mdata = rte_pktmbuf_append(m_src, extend_tail + mdata_len);
84 if (unlikely(mdata == NULL)) {
85 otx2_err("Not enough tail room\n");
90 mdata += extend_tail; /* mdata follows encrypted data */
91 req = alloc_request_struct(mdata, (void *)cop, mdata_len,
94 data = rte_pktmbuf_prepend(m_src, extend_head);
95 if (unlikely(data == NULL)) {
96 otx2_err("Not enough head room\n");
102 * Move the Ethernet header, to insert otx2_ipsec_po_out_hdr prior
105 memcpy(data, data + hdr_len, RTE_ETHER_HDR_LEN);
107 hdr = (struct otx2_ipsec_po_out_hdr *)rte_pktmbuf_adj(m_src,
110 if (ctl_wrd->enc_type == OTX2_IPSEC_FP_SA_ENC_AES_GCM) {
111 memcpy(&hdr->iv[0], &sa->iv.gcm.nonce, 4);
112 memcpy(&hdr->iv[4], rte_crypto_op_ctod_offset(cop, uint8_t *,
113 sess->iv_offset), sess->iv_length);
114 } else if (ctl_wrd->auth_type == OTX2_IPSEC_PO_SA_AUTH_SHA1) {
115 memcpy(&hdr->iv[0], rte_crypto_op_ctod_offset(cop, uint8_t *,
116 sess->iv_offset), sess->iv_length);
119 /* Prepare CPT instruction */
120 word0.u64 = sess->ucmd_w0;
123 req->ist.ei0 = word0.u64;
124 req->ist.ei1 = rte_pktmbuf_iova(m_src);
125 req->ist.ei2 = req->ist.ei1;
127 sa->esn_hi = sess->seq_hi;
129 hdr->seq = rte_cpu_to_be_32(sess->seq_lo);
130 hdr->ip_id = rte_cpu_to_be_32(sess->ip_id);
141 static __rte_always_inline int
142 process_inb_sa(struct rte_crypto_op *cop,
143 struct otx2_sec_session_ipsec_lp *sess,
144 struct cpt_qp_meta_info *m_info, void **prep_req)
146 struct rte_crypto_sym_op *sym_op = cop->sym;
147 struct rte_mbuf *m_src = sym_op->m_src;
148 struct cpt_request_info *req = NULL;
149 int mdata_len, ret = 0;
150 vq_cmd_word0_t word0;
154 dlen = rte_pktmbuf_pkt_len(m_src);
155 mdata_len = m_info->lb_mlen + 8;
157 mdata = rte_pktmbuf_append(m_src, mdata_len);
158 if (unlikely(mdata == NULL)) {
159 otx2_err("Not enough tail room\n");
164 req = alloc_request_struct(mdata, (void *)cop, mdata_len,
167 /* Prepare CPT instruction */
168 word0.u64 = sess->ucmd_w0;
171 req->ist.ei0 = word0.u64;
172 req->ist.ei1 = rte_pktmbuf_iova(m_src);
173 req->ist.ei2 = req->ist.ei1;
179 #endif /* __OTX2_IPSEC_PO_OPS_H__ */