4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <sys/types.h>
38 #include <netinet/in.h>
39 #include <netinet/ip.h>
40 #include <netinet/ip6.h>
44 #include <rte_common.h>
45 #include <rte_crypto.h>
46 #include <rte_cryptodev.h>
47 #include <rte_random.h>
54 random_iv_u64(uint64_t *buf, uint16_t n)
56 uint32_t left = n & 0x7;
59 RTE_ASSERT((n & 0x3) == 0);
61 for (i = 0; i < (n >> 3); i++)
65 *((uint32_t *)&buf[i]) = (uint32_t)lrand48();
69 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
70 struct rte_crypto_op *cop)
73 struct rte_crypto_sym_op *sym_cop;
74 int32_t payload_len, ip_hdr_len;
76 RTE_ASSERT(m != NULL);
77 RTE_ASSERT(sa != NULL);
78 RTE_ASSERT(cop != NULL);
80 ip4 = rte_pktmbuf_mtod(m, struct ip *);
81 if (likely(ip4->ip_v == IPVERSION))
82 ip_hdr_len = ip4->ip_hl * 4;
83 else if (ip4->ip_v == IP6_VERSION)
84 /* XXX No option headers supported */
85 ip_hdr_len = sizeof(struct ip6_hdr);
87 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
92 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
93 sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
95 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
96 RTE_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
97 payload_len, sa->block_size);
101 sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
104 sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
106 sym_cop->cipher.data.length = payload_len;
108 sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, void*,
109 ip_hdr_len + sizeof(struct esp_hdr));
110 sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
111 ip_hdr_len + sizeof(struct esp_hdr));
112 sym_cop->cipher.iv.length = sa->iv_len;
114 sym_cop->auth.data.offset = ip_hdr_len;
115 sym_cop->auth.data.length = sizeof(struct esp_hdr) +
116 sa->iv_len + payload_len;
118 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
119 rte_pktmbuf_pkt_len(m) - sa->digest_len);
120 sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
121 rte_pktmbuf_pkt_len(m) - sa->digest_len);
122 sym_cop->auth.digest.length = sa->digest_len;
128 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
129 struct rte_crypto_op *cop)
133 uint8_t *nexthdr, *pad_len;
137 RTE_ASSERT(m != NULL);
138 RTE_ASSERT(sa != NULL);
139 RTE_ASSERT(cop != NULL);
141 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
142 RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
146 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
147 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
148 pad_len = nexthdr - 1;
150 padding = pad_len - *pad_len;
151 for (i = 0; i < *pad_len; i++) {
152 if (padding[i] != i + 1) {
153 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
158 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
159 RTE_LOG(ERR, IPSEC_ESP,
160 "failed to remove pad_len + digest\n");
164 if (unlikely(sa->flags == TRANSPORT)) {
165 ip = rte_pktmbuf_mtod(m, struct ip *);
166 ip4 = (struct ip *)rte_pktmbuf_adj(m,
167 sizeof(struct esp_hdr) + sa->iv_len);
168 if (likely(ip->ip_v == IPVERSION)) {
169 memmove(ip4, ip, ip->ip_hl * 4);
170 ip4->ip_p = *nexthdr;
171 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
173 ip6 = (struct ip6_hdr *)ip4;
174 /* XXX No option headers supported */
175 memmove(ip6, ip, sizeof(struct ip6_hdr));
176 ip6->ip6_nxt = *nexthdr;
177 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
180 ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
186 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
187 struct rte_crypto_op *cop)
191 struct esp_hdr *esp = NULL;
192 uint8_t *padding, *new_ip, nlp;
193 struct rte_crypto_sym_op *sym_cop;
195 uint16_t pad_payload_len, pad_len, ip_hdr_len;
197 RTE_ASSERT(m != NULL);
198 RTE_ASSERT(sa != NULL);
199 RTE_ASSERT(cop != NULL);
203 ip4 = rte_pktmbuf_mtod(m, struct ip *);
204 if (likely(ip4->ip_v == IPVERSION)) {
205 if (unlikely(sa->flags == TRANSPORT)) {
206 ip_hdr_len = ip4->ip_hl * 4;
210 } else if (ip4->ip_v == IP6_VERSION) {
211 if (unlikely(sa->flags == TRANSPORT)) {
212 /* XXX No option headers supported */
213 ip_hdr_len = sizeof(struct ip6_hdr);
214 ip6 = (struct ip6_hdr *)ip4;
219 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
224 /* Padded payload length */
225 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
226 ip_hdr_len + 2, sa->block_size);
227 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
229 RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
230 sa->flags == TRANSPORT);
232 if (likely(sa->flags == IP4_TUNNEL))
233 ip_hdr_len = sizeof(struct ip);
234 else if (sa->flags == IP6_TUNNEL)
235 ip_hdr_len = sizeof(struct ip6_hdr);
236 else if (sa->flags != TRANSPORT) {
237 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
242 /* Check maximum packet size */
243 if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
244 pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
245 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
249 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len);
250 if (unlikely(padding == NULL)) {
251 RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n");
254 rte_prefetch0(padding);
258 ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
260 esp = (struct esp_hdr *)(ip4 + 1);
263 ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
265 esp = (struct esp_hdr *)(ip6 + 1);
268 new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
269 sizeof(struct esp_hdr) + sa->iv_len);
270 memmove(new_ip, ip4, ip_hdr_len);
271 esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
272 if (likely(ip4->ip_v == IPVERSION)) {
273 ip4 = (struct ip *)new_ip;
274 ip4->ip_p = IPPROTO_ESP;
275 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
277 ip6 = (struct ip6_hdr *)new_ip;
278 ip6->ip6_nxt = IPPROTO_ESP;
279 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
284 esp->spi = rte_cpu_to_be_32(sa->spi);
285 esp->seq = rte_cpu_to_be_32(sa->seq);
287 if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC)
288 random_iv_u64((uint64_t *)(esp + 1), sa->iv_len);
290 /* Fill pad_len using default sequential scheme */
291 for (i = 0; i < pad_len - 2; i++)
293 padding[pad_len - 2] = pad_len - 2;
294 padding[pad_len - 1] = nlp;
296 sym_cop = (struct rte_crypto_sym_op *)(cop + 1);
299 sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
301 sym_cop->cipher.data.length = pad_payload_len;
303 sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
304 ip_hdr_len + sizeof(struct esp_hdr));
305 sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m,
306 ip_hdr_len + sizeof(struct esp_hdr));
307 sym_cop->cipher.iv.length = sa->iv_len;
309 sym_cop->auth.data.offset = ip_hdr_len;
310 sym_cop->auth.data.length = sizeof(struct esp_hdr) + sa->iv_len +
313 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
314 rte_pktmbuf_pkt_len(m) - sa->digest_len);
315 sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
316 rte_pktmbuf_pkt_len(m) - sa->digest_len);
317 sym_cop->auth.digest.length = sa->digest_len;
323 esp_outbound_post(struct rte_mbuf *m __rte_unused,
324 struct ipsec_sa *sa __rte_unused,
325 struct rte_crypto_op *cop)
327 RTE_ASSERT(m != NULL);
328 RTE_ASSERT(sa != NULL);
329 RTE_ASSERT(cop != NULL);
331 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
332 RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");