1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN9K_IPSEC_LA_OPS_H__
6 #define __CN9K_IPSEC_LA_OPS_H__
8 #include <rte_crypto_sym.h>
10 #include <rte_security.h>
12 #include "cn9k_ipsec.h"
13 #include "cnxk_security_ar.h"
15 static __rte_always_inline int32_t
16 ipsec_po_out_rlen_get(struct cn9k_ipsec_sa *sa, uint32_t plen)
18 uint32_t enc_payload_len;
20 enc_payload_len = RTE_ALIGN_CEIL(plen + sa->rlens.roundup_len,
21 sa->rlens.roundup_byte);
23 return sa->rlens.partial_len + enc_payload_len;
26 static __rte_always_inline int
27 ipsec_antireplay_check(struct cn9k_ipsec_sa *sa, uint32_t win_sz,
30 uint32_t esn_low = 0, esn_hi = 0, seql = 0, seqh = 0;
31 struct roc_ie_on_common_sa *common_sa;
32 struct roc_ie_on_inb_sa *in_sa;
33 struct roc_ie_on_sa_ctl *ctl;
34 uint64_t seq_in_sa, seq = 0;
35 struct rte_esp_hdr *esp;
40 common_sa = &in_sa->common_sa;
41 ctl = &common_sa->ctl;
44 esn_low = rte_be_to_cpu_32(common_sa->esn_low);
45 esn_hi = rte_be_to_cpu_32(common_sa->esn_hi);
47 esp = rte_pktmbuf_mtod_offset(m, void *, sizeof(struct rte_ipv4_hdr));
48 seql = rte_be_to_cpu_32(esp->seq);
53 seqh = cnxk_on_anti_replay_get_seqh(win_sz, seql, esn_hi,
55 seq = ((uint64_t)seqh << 32) | seql;
58 if (unlikely(seq == 0))
59 return IPSEC_ANTI_REPLAY_FAILED;
61 ret = cnxk_on_anti_replay_check(seq, &sa->ar, win_sz);
63 seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
64 if (seq > seq_in_sa) {
65 common_sa->esn_low = rte_cpu_to_be_32(seql);
66 common_sa->esn_hi = rte_cpu_to_be_32(seqh);
73 static __rte_always_inline int
74 process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
75 struct cpt_inst_s *inst)
77 const unsigned int hdr_len = sa->custom_hdr_len;
78 struct rte_crypto_sym_op *sym_op = cop->sym;
79 struct rte_mbuf *m_src = sym_op->m_src;
80 struct roc_ie_on_outb_sa *out_sa;
81 struct roc_ie_on_outb_hdr *hdr;
87 dlen = rte_pktmbuf_pkt_len(m_src) + hdr_len;
88 rlen = ipsec_po_out_rlen_get(sa, dlen - hdr_len);
90 extend_tail = rlen - dlen;
91 if (unlikely(extend_tail > rte_pktmbuf_tailroom(m_src))) {
92 plt_dp_err("Not enough tail room (required: %d, available: %d",
93 extend_tail, rte_pktmbuf_tailroom(m_src));
97 m_src->data_len += extend_tail;
98 m_src->pkt_len += extend_tail;
100 hdr = (struct roc_ie_on_outb_hdr *)rte_pktmbuf_prepend(m_src, hdr_len);
101 if (unlikely(hdr == NULL)) {
102 plt_dp_err("Not enough head room");
106 #ifdef LA_IPSEC_DEBUG
107 if (sa->inst.w4 & ROC_IE_ON_PER_PKT_IV) {
109 rte_crypto_op_ctod_offset(cop, uint8_t *,
115 hdr->seq = rte_cpu_to_be_32(sa->seq_lo);
116 hdr->ip_id = rte_cpu_to_be_32(sa->ip_id);
118 out_sa->common_sa.esn_hi = sa->seq_hi;
123 /* Prepare CPT instruction */
124 inst->w4.u64 = sa->inst.w4 | dlen;
125 inst->dptr = rte_pktmbuf_iova(m_src);
126 inst->rptr = inst->dptr;
127 inst->w7.u64 = sa->inst.w7;
132 static __rte_always_inline int
133 process_inb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
134 struct cpt_inst_s *inst)
136 struct rte_crypto_sym_op *sym_op = cop->sym;
137 struct rte_mbuf *m_src = sym_op->m_src;
140 if (sa->replay_win_sz) {
141 ret = ipsec_antireplay_check(sa, sa->replay_win_sz, m_src);
143 /* Use PASSTHROUGH op for failed antireplay packet */
145 inst->w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
146 inst->w4.s.opcode_minor =
147 ROC_SE_MISC_MINOR_OP_PASSTHROUGH;
148 inst->w4.s.param1 = 1;
149 /* Send out completion code only */
151 (ROC_IE_ON_SWCC_ANTI_REPLAY << 8) | 0x1;
153 inst->dptr = rte_pktmbuf_iova(m_src);
154 inst->rptr = inst->dptr;
155 inst->w7.u64 = sa->inst.w7;
160 /* Prepare CPT instruction */
161 inst->w4.u64 = sa->inst.w4 | rte_pktmbuf_pkt_len(m_src);
162 inst->dptr = rte_pktmbuf_iova(m_src);
163 inst->rptr = inst->dptr;
164 inst->w7.u64 = sa->inst.w7;
168 #endif /* __CN9K_IPSEC_LA_OPS_H__ */