4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <sys/types.h>
38 #include <netinet/in.h>
39 #include <netinet/ip.h>
40 #include <netinet/ip6.h>
44 #include <rte_common.h>
45 #include <rte_crypto.h>
46 #include <rte_cryptodev.h>
47 #include <rte_random.h>
53 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
54 sizeof(struct rte_crypto_sym_op))
57 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
58 struct rte_crypto_op *cop)
61 struct rte_crypto_sym_op *sym_cop;
62 int32_t payload_len, ip_hdr_len;
64 RTE_ASSERT(m != NULL);
65 RTE_ASSERT(sa != NULL);
66 RTE_ASSERT(cop != NULL);
68 ip4 = rte_pktmbuf_mtod(m, struct ip *);
69 if (likely(ip4->ip_v == IPVERSION))
70 ip_hdr_len = ip4->ip_hl * 4;
71 else if (ip4->ip_v == IP6_VERSION)
72 /* XXX No option headers supported */
73 ip_hdr_len = sizeof(struct ip6_hdr);
75 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
80 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
81 sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
83 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
84 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
85 payload_len, sa->block_size);
89 sym_cop = get_sym_cop(cop);
92 sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
94 sym_cop->cipher.data.length = payload_len;
98 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
99 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
100 uint8_t *, IV_OFFSET);
102 switch (sa->cipher_algo) {
103 case RTE_CRYPTO_CIPHER_NULL:
104 case RTE_CRYPTO_CIPHER_AES_CBC:
105 /* Copy IV at the end of crypto operation */
106 rte_memcpy(iv_ptr, iv, sa->iv_len);
107 sym_cop->cipher.iv.offset = IV_OFFSET;
108 sym_cop->cipher.iv.length = sa->iv_len;
110 case RTE_CRYPTO_CIPHER_AES_CTR:
111 case RTE_CRYPTO_CIPHER_AES_GCM:
112 icb = get_cnt_blk(m);
113 icb->salt = sa->salt;
114 memcpy(&icb->iv, iv, 8);
115 icb->cnt = rte_cpu_to_be_32(1);
116 sym_cop->cipher.iv.offset = IV_OFFSET;
117 sym_cop->cipher.iv.length = 16;
120 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
125 switch (sa->auth_algo) {
126 case RTE_CRYPTO_AUTH_NULL:
127 case RTE_CRYPTO_AUTH_SHA1_HMAC:
128 case RTE_CRYPTO_AUTH_SHA256_HMAC:
129 sym_cop->auth.data.offset = ip_hdr_len;
130 sym_cop->auth.data.length = sizeof(struct esp_hdr) +
131 sa->iv_len + payload_len;
133 case RTE_CRYPTO_AUTH_AES_GCM:
135 memcpy(aad, iv - sizeof(struct esp_hdr), 8);
136 sym_cop->auth.aad.data = aad;
137 sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
138 aad - rte_pktmbuf_mtod(m, uint8_t *));
139 sym_cop->auth.aad.length = 8;
142 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
147 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
148 rte_pktmbuf_pkt_len(m) - sa->digest_len);
149 sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
150 rte_pktmbuf_pkt_len(m) - sa->digest_len);
151 sym_cop->auth.digest.length = sa->digest_len;
157 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
158 struct rte_crypto_op *cop)
162 uint8_t *nexthdr, *pad_len;
166 RTE_ASSERT(m != NULL);
167 RTE_ASSERT(sa != NULL);
168 RTE_ASSERT(cop != NULL);
170 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
171 RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
175 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
176 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
177 pad_len = nexthdr - 1;
179 padding = pad_len - *pad_len;
180 for (i = 0; i < *pad_len; i++) {
181 if (padding[i] != i + 1) {
182 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
187 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
188 RTE_LOG(ERR, IPSEC_ESP,
189 "failed to remove pad_len + digest\n");
193 if (unlikely(sa->flags == TRANSPORT)) {
194 ip = rte_pktmbuf_mtod(m, struct ip *);
195 ip4 = (struct ip *)rte_pktmbuf_adj(m,
196 sizeof(struct esp_hdr) + sa->iv_len);
197 if (likely(ip->ip_v == IPVERSION)) {
198 memmove(ip4, ip, ip->ip_hl * 4);
199 ip4->ip_p = *nexthdr;
200 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
202 ip6 = (struct ip6_hdr *)ip4;
203 /* XXX No option headers supported */
204 memmove(ip6, ip, sizeof(struct ip6_hdr));
205 ip6->ip6_nxt = *nexthdr;
206 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
209 ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
215 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
216 struct rte_crypto_op *cop)
220 struct esp_hdr *esp = NULL;
221 uint8_t *padding, *new_ip, nlp;
222 struct rte_crypto_sym_op *sym_cop;
224 uint16_t pad_payload_len, pad_len, ip_hdr_len;
226 RTE_ASSERT(m != NULL);
227 RTE_ASSERT(sa != NULL);
228 RTE_ASSERT(cop != NULL);
232 ip4 = rte_pktmbuf_mtod(m, struct ip *);
233 if (likely(ip4->ip_v == IPVERSION)) {
234 if (unlikely(sa->flags == TRANSPORT)) {
235 ip_hdr_len = ip4->ip_hl * 4;
239 } else if (ip4->ip_v == IP6_VERSION) {
240 if (unlikely(sa->flags == TRANSPORT)) {
241 /* XXX No option headers supported */
242 ip_hdr_len = sizeof(struct ip6_hdr);
243 ip6 = (struct ip6_hdr *)ip4;
248 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
253 /* Padded payload length */
254 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
255 ip_hdr_len + 2, sa->block_size);
256 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
258 RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
259 sa->flags == TRANSPORT);
261 if (likely(sa->flags == IP4_TUNNEL))
262 ip_hdr_len = sizeof(struct ip);
263 else if (sa->flags == IP6_TUNNEL)
264 ip_hdr_len = sizeof(struct ip6_hdr);
265 else if (sa->flags != TRANSPORT) {
266 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
271 /* Check maximum packet size */
272 if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
273 pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
274 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
278 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len);
279 if (unlikely(padding == NULL)) {
280 RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n");
283 rte_prefetch0(padding);
287 ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
289 esp = (struct esp_hdr *)(ip4 + 1);
292 ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
294 esp = (struct esp_hdr *)(ip6 + 1);
297 new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
298 sizeof(struct esp_hdr) + sa->iv_len);
299 memmove(new_ip, ip4, ip_hdr_len);
300 esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
301 if (likely(ip4->ip_v == IPVERSION)) {
302 ip4 = (struct ip *)new_ip;
303 ip4->ip_p = IPPROTO_ESP;
304 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
306 ip6 = (struct ip6_hdr *)new_ip;
307 ip6->ip6_nxt = IPPROTO_ESP;
308 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
313 esp->spi = rte_cpu_to_be_32(sa->spi);
314 esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
316 uint64_t *iv = (uint64_t *)(esp + 1);
318 sym_cop = get_sym_cop(cop);
320 switch (sa->cipher_algo) {
321 case RTE_CRYPTO_CIPHER_NULL:
322 case RTE_CRYPTO_CIPHER_AES_CBC:
323 memset(iv, 0, sa->iv_len);
324 sym_cop->cipher.data.offset = ip_hdr_len +
325 sizeof(struct esp_hdr);
326 sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
328 case RTE_CRYPTO_CIPHER_AES_CTR:
329 case RTE_CRYPTO_CIPHER_AES_GCM:
331 sym_cop->cipher.data.offset = ip_hdr_len +
332 sizeof(struct esp_hdr) + sa->iv_len;
333 sym_cop->cipher.data.length = pad_payload_len;
336 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
341 /* Fill pad_len using default sequential scheme */
342 for (i = 0; i < pad_len - 2; i++)
344 padding[pad_len - 2] = pad_len - 2;
345 padding[pad_len - 1] = nlp;
347 struct cnt_blk *icb = get_cnt_blk(m);
348 icb->salt = sa->salt;
350 icb->cnt = rte_cpu_to_be_32(1);
351 sym_cop->cipher.iv.offset = IV_OFFSET;
352 sym_cop->cipher.iv.length = 16;
356 switch (sa->auth_algo) {
357 case RTE_CRYPTO_AUTH_NULL:
358 case RTE_CRYPTO_AUTH_SHA1_HMAC:
359 case RTE_CRYPTO_AUTH_SHA256_HMAC:
360 sym_cop->auth.data.offset = ip_hdr_len;
361 sym_cop->auth.data.length = sizeof(struct esp_hdr) +
362 sa->iv_len + pad_payload_len;
364 case RTE_CRYPTO_AUTH_AES_GCM:
367 sym_cop->auth.aad.data = aad;
368 sym_cop->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
369 aad - rte_pktmbuf_mtod(m, uint8_t *));
370 sym_cop->auth.aad.length = 8;
373 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
378 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
379 rte_pktmbuf_pkt_len(m) - sa->digest_len);
380 sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
381 rte_pktmbuf_pkt_len(m) - sa->digest_len);
382 sym_cop->auth.digest.length = sa->digest_len;
388 esp_outbound_post(struct rte_mbuf *m __rte_unused,
389 struct ipsec_sa *sa __rte_unused,
390 struct rte_crypto_op *cop)
392 RTE_ASSERT(m != NULL);
393 RTE_ASSERT(sa != NULL);
394 RTE_ASSERT(cop != NULL);
396 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
397 RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");