1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN9K_IPSEC_LA_OPS_H__
6 #define __CN9K_IPSEC_LA_OPS_H__
8 #include <rte_crypto_sym.h>
10 #include <rte_security.h>
12 #include "cn9k_ipsec.h"
13 #include "cnxk_security_ar.h"
15 static __rte_always_inline int32_t
16 ipsec_po_out_rlen_get(struct cn9k_ipsec_sa *sa, uint32_t plen)
18 uint32_t enc_payload_len;
20 enc_payload_len = RTE_ALIGN_CEIL(plen + sa->rlens.roundup_len,
21 sa->rlens.roundup_byte);
23 return sa->rlens.partial_len + enc_payload_len;
26 static __rte_always_inline int
27 ipsec_antireplay_check(struct cn9k_ipsec_sa *sa, uint32_t win_sz,
30 uint32_t esn_low = 0, esn_hi = 0, seql = 0, seqh = 0;
31 struct roc_ie_on_common_sa *common_sa;
32 struct roc_ie_on_inb_sa *in_sa;
33 struct roc_ie_on_sa_ctl *ctl;
34 uint64_t seq_in_sa, seq = 0;
35 struct rte_esp_hdr *esp;
40 common_sa = &in_sa->common_sa;
41 ctl = &common_sa->ctl;
44 esn_low = rte_be_to_cpu_32(common_sa->esn_low);
45 esn_hi = rte_be_to_cpu_32(common_sa->esn_hi);
47 esp = rte_pktmbuf_mtod_offset(m, void *, sizeof(struct rte_ipv4_hdr));
48 seql = rte_be_to_cpu_32(esp->seq);
53 seqh = cnxk_on_anti_replay_get_seqh(win_sz, seql, esn_hi,
55 seq = ((uint64_t)seqh << 32) | seql;
58 if (unlikely(seq == 0))
59 return IPSEC_ANTI_REPLAY_FAILED;
61 ret = cnxk_on_anti_replay_check(seq, &sa->ar, win_sz);
63 seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
64 if (seq > seq_in_sa) {
65 common_sa->esn_low = rte_cpu_to_be_32(seql);
66 common_sa->esn_hi = rte_cpu_to_be_32(seqh);
73 static __rte_always_inline int
74 process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
75 struct cpt_inst_s *inst)
77 const unsigned int hdr_len = sizeof(struct roc_ie_on_outb_hdr);
78 struct rte_crypto_sym_op *sym_op = cop->sym;
79 struct rte_mbuf *m_src = sym_op->m_src;
80 struct roc_ie_on_outb_sa *out_sa;
81 struct roc_ie_on_outb_hdr *hdr;
87 dlen = rte_pktmbuf_pkt_len(m_src) + hdr_len;
88 rlen = ipsec_po_out_rlen_get(sa, dlen - hdr_len);
90 extend_tail = rlen - dlen;
91 if (unlikely(extend_tail > rte_pktmbuf_tailroom(m_src))) {
92 plt_dp_err("Not enough tail room (required: %d, available: %d",
93 extend_tail, rte_pktmbuf_tailroom(m_src));
97 m_src->data_len += extend_tail;
98 m_src->pkt_len += extend_tail;
100 hdr = (struct roc_ie_on_outb_hdr *)rte_pktmbuf_prepend(m_src, hdr_len);
101 if (unlikely(hdr == NULL)) {
102 plt_dp_err("Not enough head room");
107 rte_crypto_op_ctod_offset(cop, uint8_t *, sa->cipher_iv_off),
109 hdr->seq = rte_cpu_to_be_32(sa->seq_lo);
110 hdr->ip_id = rte_cpu_to_be_32(sa->ip_id);
112 out_sa->common_sa.esn_hi = sa->seq_hi;
117 /* Prepare CPT instruction */
118 inst->w4.u64 = sa->inst.w4 | dlen;
119 inst->dptr = rte_pktmbuf_iova(m_src);
120 inst->rptr = inst->dptr;
121 inst->w7.u64 = sa->inst.w7;
126 static __rte_always_inline int
127 process_inb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
128 struct cpt_inst_s *inst)
130 struct rte_crypto_sym_op *sym_op = cop->sym;
131 struct rte_mbuf *m_src = sym_op->m_src;
134 if (sa->replay_win_sz) {
135 ret = ipsec_antireplay_check(sa, sa->replay_win_sz, m_src);
137 plt_dp_err("Anti replay check failed");
142 /* Prepare CPT instruction */
143 inst->w4.u64 = sa->inst.w4 | rte_pktmbuf_pkt_len(m_src);
144 inst->dptr = rte_pktmbuf_iova(m_src);
145 inst->rptr = inst->dptr;
146 inst->w7.u64 = sa->inst.w7;
150 #endif /* __CN9K_IPSEC_LA_OPS_H__ */