net/tap: set BPF syscall ID for RISC-V
[dpdk.git] / drivers / crypto / cnxk / cn9k_ipsec_la_ops.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef __CN9K_IPSEC_LA_OPS_H__
6 #define __CN9K_IPSEC_LA_OPS_H__
7
8 #include <rte_crypto_sym.h>
9 #include <rte_esp.h>
10 #include <rte_security.h>
11
12 #include "cn9k_ipsec.h"
13 #include "cnxk_security_ar.h"
14
15 static __rte_always_inline int32_t
16 ipsec_po_out_rlen_get(struct cn9k_ipsec_sa *sa, uint32_t plen)
17 {
18         uint32_t enc_payload_len;
19
20         enc_payload_len = RTE_ALIGN_CEIL(plen + sa->rlens.roundup_len,
21                                          sa->rlens.roundup_byte);
22
23         return sa->rlens.partial_len + enc_payload_len;
24 }
25
26 static __rte_always_inline int
27 ipsec_antireplay_check(struct cn9k_ipsec_sa *sa, uint32_t win_sz,
28                        struct rte_mbuf *m)
29 {
30         uint32_t esn_low = 0, esn_hi = 0, seql = 0, seqh = 0;
31         struct roc_ie_on_common_sa *common_sa;
32         struct roc_ie_on_inb_sa *in_sa;
33         struct roc_ie_on_sa_ctl *ctl;
34         uint64_t seq_in_sa, seq = 0;
35         struct rte_esp_hdr *esp;
36         uint8_t esn;
37         int ret;
38
39         in_sa = &sa->in_sa;
40         common_sa = &in_sa->common_sa;
41         ctl = &common_sa->ctl;
42
43         esn = ctl->esn_en;
44         esn_low = rte_be_to_cpu_32(common_sa->esn_low);
45         esn_hi = rte_be_to_cpu_32(common_sa->esn_hi);
46
47         esp = rte_pktmbuf_mtod_offset(m, void *, sizeof(struct rte_ipv4_hdr));
48         seql = rte_be_to_cpu_32(esp->seq);
49
50         if (!esn) {
51                 seq = (uint64_t)seql;
52         } else {
53                 seqh = cnxk_on_anti_replay_get_seqh(win_sz, seql, esn_hi,
54                                                     esn_low);
55                 seq = ((uint64_t)seqh << 32) | seql;
56         }
57
58         if (unlikely(seq == 0))
59                 return IPSEC_ANTI_REPLAY_FAILED;
60
61         ret = cnxk_on_anti_replay_check(seq, &sa->ar, win_sz);
62         if (esn && !ret) {
63                 seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
64                 if (seq > seq_in_sa) {
65                         common_sa->esn_low = rte_cpu_to_be_32(seql);
66                         common_sa->esn_hi = rte_cpu_to_be_32(seqh);
67                 }
68         }
69
70         return ret;
71 }
72
73 static __rte_always_inline int
74 process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
75                 struct cpt_inst_s *inst)
76 {
77         const unsigned int hdr_len = sa->custom_hdr_len;
78         struct rte_crypto_sym_op *sym_op = cop->sym;
79         struct rte_mbuf *m_src = sym_op->m_src;
80         struct roc_ie_on_outb_sa *out_sa;
81         struct roc_ie_on_outb_hdr *hdr;
82         uint32_t dlen, rlen;
83         int32_t extend_tail;
84
85         out_sa = &sa->out_sa;
86
87         dlen = rte_pktmbuf_pkt_len(m_src) + hdr_len;
88         rlen = ipsec_po_out_rlen_get(sa, dlen - hdr_len);
89
90         extend_tail = rlen - dlen;
91         if (unlikely(extend_tail > rte_pktmbuf_tailroom(m_src))) {
92                 plt_dp_err("Not enough tail room (required: %d, available: %d",
93                            extend_tail, rte_pktmbuf_tailroom(m_src));
94                 return -ENOMEM;
95         }
96
97         m_src->data_len += extend_tail;
98         m_src->pkt_len += extend_tail;
99
100         hdr = (struct roc_ie_on_outb_hdr *)rte_pktmbuf_prepend(m_src, hdr_len);
101         if (unlikely(hdr == NULL)) {
102                 plt_dp_err("Not enough head room");
103                 return -ENOMEM;
104         }
105
106 #ifdef LA_IPSEC_DEBUG
107         if (sa->inst.w4 & ROC_IE_ON_PER_PKT_IV) {
108                 memcpy(&hdr->iv[0],
109                        rte_crypto_op_ctod_offset(cop, uint8_t *,
110                                                  sa->cipher_iv_off),
111                        sa->cipher_iv_len);
112         }
113 #endif
114
115         hdr->seq = rte_cpu_to_be_32(sa->seq_lo);
116         hdr->ip_id = rte_cpu_to_be_32(sa->ip_id);
117
118         out_sa->common_sa.esn_hi = sa->seq_hi;
119
120         sa->ip_id++;
121         sa->esn++;
122
123         /* Prepare CPT instruction */
124         inst->w4.u64 = sa->inst.w4 | dlen;
125         inst->dptr = rte_pktmbuf_iova(m_src);
126         inst->rptr = inst->dptr;
127         inst->w7.u64 = sa->inst.w7;
128
129         return 0;
130 }
131
132 static __rte_always_inline int
133 process_inb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
134                struct cpt_inst_s *inst)
135 {
136         struct rte_crypto_sym_op *sym_op = cop->sym;
137         struct rte_mbuf *m_src = sym_op->m_src;
138         int ret;
139
140         if (sa->replay_win_sz) {
141                 ret = ipsec_antireplay_check(sa, sa->replay_win_sz, m_src);
142                 if (unlikely(ret)) {
143                         /* Use PASSTHROUGH op for failed antireplay packet */
144                         inst->w4.u64 = 0;
145                         inst->w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
146                         inst->w4.s.opcode_minor =
147                                 ROC_SE_MISC_MINOR_OP_PASSTHROUGH;
148                         inst->w4.s.param1 = 1;
149                         /* Send out completion code only */
150                         inst->w4.s.param2 =
151                                 (ROC_IE_ON_SWCC_ANTI_REPLAY << 8) | 0x1;
152                         inst->w4.s.dlen = 1;
153                         inst->dptr = rte_pktmbuf_iova(m_src);
154                         inst->rptr = inst->dptr;
155                         inst->w7.u64 = sa->inst.w7;
156                         return 0;
157                 }
158         }
159
160         /* Prepare CPT instruction */
161         inst->w4.u64 = sa->inst.w4 | rte_pktmbuf_pkt_len(m_src);
162         inst->dptr = rte_pktmbuf_iova(m_src);
163         inst->rptr = inst->dptr;
164         inst->w7.u64 = sa->inst.w7;
165
166         return 0;
167 }
168 #endif /* __CN9K_IPSEC_LA_OPS_H__ */