2 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(C) 2019 Marvell International Ltd.
6 #ifndef __OTX2_IPSEC_PO_OPS_H__
7 #define __OTX2_IPSEC_PO_OPS_H__
9 #include <rte_crypto_sym.h>
10 #include <rte_security.h>
12 #include "otx2_cryptodev.h"
13 #include "otx2_security.h"
15 static __rte_always_inline int32_t
16 otx2_ipsec_po_out_rlen_get(struct otx2_sec_session_ipsec_lp *sess,
19 uint32_t enc_payload_len;
21 enc_payload_len = RTE_ALIGN_CEIL(plen + sess->roundup_len,
24 return sess->partial_len + enc_payload_len;
27 static __rte_always_inline struct cpt_request_info *
28 alloc_request_struct(char *maddr, void *cop, int mdata_len)
30 struct cpt_request_info *req;
31 struct cpt_meta_info *meta;
35 meta = (void *)RTE_PTR_ALIGN((uint8_t *)maddr, 16);
37 op = (uintptr_t *)meta->deq_op_info;
39 resp_addr = (uint8_t *)&meta->cpt_res;
41 req->completion_addr = (uint64_t *)((uint8_t *)resp_addr);
42 *req->completion_addr = COMPLETION_CODE_INIT;
43 req->comp_baddr = rte_mem_virt2iova(resp_addr);
46 op[0] = (uintptr_t)((uint64_t)meta | 1ull);
47 op[1] = (uintptr_t)cop;
48 op[2] = (uintptr_t)req;
54 static __rte_always_inline int
55 process_outb_sa(struct rte_crypto_op *cop,
56 struct otx2_sec_session_ipsec_lp *sess,
57 struct cpt_qp_meta_info *m_info, void **prep_req)
59 uint32_t dlen, rlen, extend_head, extend_tail;
60 struct rte_crypto_sym_op *sym_op = cop->sym;
61 struct rte_mbuf *m_src = sym_op->m_src;
62 struct otx2_ipsec_po_sa_ctl *ctl_wrd;
63 struct cpt_request_info *req = NULL;
64 struct otx2_ipsec_po_out_hdr *hdr;
65 struct otx2_ipsec_po_out_sa *sa;
66 int hdr_len, mdata_len, ret = 0;
72 hdr_len = sizeof(*hdr);
74 dlen = rte_pktmbuf_pkt_len(m_src) + hdr_len;
75 rlen = otx2_ipsec_po_out_rlen_get(sess, dlen - hdr_len);
77 extend_head = hdr_len + RTE_ETHER_HDR_LEN;
78 extend_tail = rlen - dlen;
79 mdata_len = m_info->lb_mlen + 8;
81 mdata = rte_pktmbuf_append(m_src, extend_tail + mdata_len);
82 if (unlikely(mdata == NULL)) {
83 otx2_err("Not enough tail room\n");
88 mdata += extend_tail; /* mdata follows encrypted data */
89 req = alloc_request_struct(mdata, (void *)cop, mdata_len);
91 data = rte_pktmbuf_prepend(m_src, extend_head);
92 if (unlikely(data == NULL)) {
93 otx2_err("Not enough head room\n");
99 * Move the Ethernet header, to insert otx2_ipsec_po_out_hdr prior
102 memcpy(data, data + hdr_len, RTE_ETHER_HDR_LEN);
104 hdr = (struct otx2_ipsec_po_out_hdr *)rte_pktmbuf_adj(m_src,
107 if (ctl_wrd->enc_type == OTX2_IPSEC_FP_SA_ENC_AES_GCM) {
108 memcpy(&hdr->iv[0], &sa->iv.gcm.nonce, 4);
109 memcpy(&hdr->iv[4], rte_crypto_op_ctod_offset(cop, uint8_t *,
110 sess->iv_offset), sess->iv_length);
111 } else if (ctl_wrd->auth_type == OTX2_IPSEC_FP_SA_ENC_AES_CBC) {
112 memcpy(&hdr->iv[0], rte_crypto_op_ctod_offset(cop, uint8_t *,
113 sess->iv_offset), sess->iv_length);
116 /* Prepare CPT instruction */
117 word0.u64 = sess->ucmd_w0;
120 req->ist.ei0 = word0.u64;
121 req->ist.ei1 = rte_pktmbuf_iova(m_src);
122 req->ist.ei2 = req->ist.ei1;
123 req->ist.ei3 = sess->ucmd_w3;
125 hdr->seq = rte_cpu_to_be_32(sess->seq_lo);
126 hdr->ip_id = rte_cpu_to_be_32(sess->ip_id);
137 static __rte_always_inline int
138 process_inb_sa(struct rte_crypto_op *cop,
139 struct otx2_sec_session_ipsec_lp *sess,
140 struct cpt_qp_meta_info *m_info, void **prep_req)
142 struct rte_crypto_sym_op *sym_op = cop->sym;
143 struct rte_mbuf *m_src = sym_op->m_src;
144 struct cpt_request_info *req = NULL;
145 int mdata_len, ret = 0;
146 vq_cmd_word0_t word0;
150 dlen = rte_pktmbuf_pkt_len(m_src);
151 mdata_len = m_info->lb_mlen + 8;
153 mdata = rte_pktmbuf_append(m_src, mdata_len);
154 if (unlikely(mdata == NULL)) {
155 otx2_err("Not enough tail room\n");
160 req = alloc_request_struct(mdata, (void *)cop, mdata_len);
162 /* Prepare CPT instruction */
163 word0.u64 = sess->ucmd_w0;
166 req->ist.ei0 = word0.u64;
167 req->ist.ei1 = rte_pktmbuf_iova(m_src);
168 req->ist.ei2 = req->ist.ei1;
169 req->ist.ei3 = sess->ucmd_w3;
175 #endif /* __OTX2_IPSEC_PO_OPS_H__ */