4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <sys/types.h>
38 #include <netinet/in.h>
39 #include <netinet/ip.h>
40 #include <netinet/ip6.h>
44 #include <rte_common.h>
45 #include <rte_crypto.h>
46 #include <rte_cryptodev.h>
47 #include <rte_random.h>
54 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
55 struct rte_crypto_op *cop)
58 struct rte_crypto_sym_op *sym_cop;
59 int32_t payload_len, ip_hdr_len;
61 RTE_ASSERT(sa != NULL);
62 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
65 RTE_ASSERT(m != NULL);
66 RTE_ASSERT(cop != NULL);
68 ip4 = rte_pktmbuf_mtod(m, struct ip *);
69 if (likely(ip4->ip_v == IPVERSION))
70 ip_hdr_len = ip4->ip_hl * 4;
71 else if (ip4->ip_v == IP6_VERSION)
72 /* XXX No option headers supported */
73 ip_hdr_len = sizeof(struct ip6_hdr);
75 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
80 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
81 sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
83 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
84 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
85 payload_len, sa->block_size);
89 sym_cop = get_sym_cop(cop);
92 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
93 sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
95 sym_cop->aead.data.length = payload_len;
99 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
101 icb = get_cnt_blk(m);
102 icb->salt = sa->salt;
103 memcpy(&icb->iv, iv, 8);
104 icb->cnt = rte_cpu_to_be_32(1);
107 memcpy(aad, iv - sizeof(struct esp_hdr), 8);
108 sym_cop->aead.aad.data = aad;
109 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
110 aad - rte_pktmbuf_mtod(m, uint8_t *));
112 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
113 rte_pktmbuf_pkt_len(m) - sa->digest_len);
114 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
115 rte_pktmbuf_pkt_len(m) - sa->digest_len);
117 sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
119 sym_cop->cipher.data.length = payload_len;
122 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
123 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
124 uint8_t *, IV_OFFSET);
126 switch (sa->cipher_algo) {
127 case RTE_CRYPTO_CIPHER_NULL:
128 case RTE_CRYPTO_CIPHER_AES_CBC:
129 /* Copy IV at the end of crypto operation */
130 rte_memcpy(iv_ptr, iv, sa->iv_len);
132 case RTE_CRYPTO_CIPHER_AES_CTR:
133 icb = get_cnt_blk(m);
134 icb->salt = sa->salt;
135 memcpy(&icb->iv, iv, 8);
136 icb->cnt = rte_cpu_to_be_32(1);
139 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
144 switch (sa->auth_algo) {
145 case RTE_CRYPTO_AUTH_NULL:
146 case RTE_CRYPTO_AUTH_SHA1_HMAC:
147 case RTE_CRYPTO_AUTH_SHA256_HMAC:
148 sym_cop->auth.data.offset = ip_hdr_len;
149 sym_cop->auth.data.length = sizeof(struct esp_hdr) +
150 sa->iv_len + payload_len;
153 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
158 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
159 rte_pktmbuf_pkt_len(m) - sa->digest_len);
160 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
161 rte_pktmbuf_pkt_len(m) - sa->digest_len);
168 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
169 struct rte_crypto_op *cop)
173 uint8_t *nexthdr, *pad_len;
177 RTE_ASSERT(m != NULL);
178 RTE_ASSERT(sa != NULL);
179 RTE_ASSERT(cop != NULL);
181 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
182 if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
183 if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
184 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
186 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
188 cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
191 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
192 RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
196 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
197 sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
198 nexthdr = &m->inner_esp_next_proto;
200 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
201 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
202 pad_len = nexthdr - 1;
204 padding = pad_len - *pad_len;
205 for (i = 0; i < *pad_len; i++) {
206 if (padding[i] != i + 1) {
207 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
212 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
213 RTE_LOG(ERR, IPSEC_ESP,
214 "failed to remove pad_len + digest\n");
219 if (unlikely(sa->flags == TRANSPORT)) {
220 ip = rte_pktmbuf_mtod(m, struct ip *);
221 ip4 = (struct ip *)rte_pktmbuf_adj(m,
222 sizeof(struct esp_hdr) + sa->iv_len);
223 if (likely(ip->ip_v == IPVERSION)) {
224 memmove(ip4, ip, ip->ip_hl * 4);
225 ip4->ip_p = *nexthdr;
226 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
228 ip6 = (struct ip6_hdr *)ip4;
229 /* XXX No option headers supported */
230 memmove(ip6, ip, sizeof(struct ip6_hdr));
231 ip6->ip6_nxt = *nexthdr;
232 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
233 sizeof(struct ip6_hdr));
236 ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
242 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
243 struct rte_crypto_op *cop)
247 struct esp_hdr *esp = NULL;
248 uint8_t *padding = NULL, *new_ip, nlp;
249 struct rte_crypto_sym_op *sym_cop;
251 uint16_t pad_payload_len, pad_len, ip_hdr_len;
253 RTE_ASSERT(m != NULL);
254 RTE_ASSERT(sa != NULL);
258 ip4 = rte_pktmbuf_mtod(m, struct ip *);
259 if (likely(ip4->ip_v == IPVERSION)) {
260 if (unlikely(sa->flags == TRANSPORT)) {
261 ip_hdr_len = ip4->ip_hl * 4;
265 } else if (ip4->ip_v == IP6_VERSION) {
266 if (unlikely(sa->flags == TRANSPORT)) {
267 /* XXX No option headers supported */
268 ip_hdr_len = sizeof(struct ip6_hdr);
269 ip6 = (struct ip6_hdr *)ip4;
274 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
279 /* Padded payload length */
280 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
281 ip_hdr_len + 2, sa->block_size);
282 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
284 RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
285 sa->flags == TRANSPORT);
287 if (likely(sa->flags == IP4_TUNNEL))
288 ip_hdr_len = sizeof(struct ip);
289 else if (sa->flags == IP6_TUNNEL)
290 ip_hdr_len = sizeof(struct ip6_hdr);
291 else if (sa->flags != TRANSPORT) {
292 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
297 /* Check maximum packet size */
298 if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
299 pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
300 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
304 /* Add trailer padding if it is not constructed by HW */
305 if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
306 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
307 !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
308 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
310 if (unlikely(padding == NULL)) {
311 RTE_LOG(ERR, IPSEC_ESP,
312 "not enough mbuf trailing space\n");
315 rte_prefetch0(padding);
320 ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
322 esp = (struct esp_hdr *)(ip4 + 1);
325 ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
327 esp = (struct esp_hdr *)(ip6 + 1);
330 new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
331 sizeof(struct esp_hdr) + sa->iv_len);
332 memmove(new_ip, ip4, ip_hdr_len);
333 esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
334 ip4 = (struct ip *)new_ip;
335 if (likely(ip4->ip_v == IPVERSION)) {
336 ip4->ip_p = IPPROTO_ESP;
337 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
339 ip6 = (struct ip6_hdr *)new_ip;
340 ip6->ip6_nxt = IPPROTO_ESP;
341 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
342 sizeof(struct ip6_hdr));
347 esp->spi = rte_cpu_to_be_32(sa->spi);
348 esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
351 uint64_t *iv = (uint64_t *)(esp + 1);
352 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
353 *iv = rte_cpu_to_be_64(sa->seq);
355 switch (sa->cipher_algo) {
356 case RTE_CRYPTO_CIPHER_NULL:
357 case RTE_CRYPTO_CIPHER_AES_CBC:
358 memset(iv, 0, sa->iv_len);
360 case RTE_CRYPTO_CIPHER_AES_CTR:
361 *iv = rte_cpu_to_be_64(sa->seq);
364 RTE_LOG(ERR, IPSEC_ESP,
365 "unsupported cipher algorithm %u\n",
371 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
372 if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
373 /* Set the inner esp next protocol for HW trailer */
374 m->inner_esp_next_proto = nlp;
375 m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
377 padding[pad_len - 2] = pad_len - 2;
378 padding[pad_len - 1] = nlp;
383 RTE_ASSERT(cop != NULL);
384 sym_cop = get_sym_cop(cop);
387 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
390 sym_cop->aead.data.offset = ip_hdr_len +
391 sizeof(struct esp_hdr) + sa->iv_len;
392 sym_cop->aead.data.length = pad_payload_len;
394 /* Fill pad_len using default sequential scheme */
395 for (i = 0; i < pad_len - 2; i++)
397 padding[pad_len - 2] = pad_len - 2;
398 padding[pad_len - 1] = nlp;
400 struct cnt_blk *icb = get_cnt_blk(m);
401 icb->salt = sa->salt;
402 icb->iv = rte_cpu_to_be_64(sa->seq);
403 icb->cnt = rte_cpu_to_be_32(1);
407 sym_cop->aead.aad.data = aad;
408 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
409 aad - rte_pktmbuf_mtod(m, uint8_t *));
411 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
412 rte_pktmbuf_pkt_len(m) - sa->digest_len);
413 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
414 rte_pktmbuf_pkt_len(m) - sa->digest_len);
416 switch (sa->cipher_algo) {
417 case RTE_CRYPTO_CIPHER_NULL:
418 case RTE_CRYPTO_CIPHER_AES_CBC:
419 sym_cop->cipher.data.offset = ip_hdr_len +
420 sizeof(struct esp_hdr);
421 sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
423 case RTE_CRYPTO_CIPHER_AES_CTR:
424 sym_cop->cipher.data.offset = ip_hdr_len +
425 sizeof(struct esp_hdr) + sa->iv_len;
426 sym_cop->cipher.data.length = pad_payload_len;
429 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
434 /* Fill pad_len using default sequential scheme */
435 for (i = 0; i < pad_len - 2; i++)
437 padding[pad_len - 2] = pad_len - 2;
438 padding[pad_len - 1] = nlp;
440 struct cnt_blk *icb = get_cnt_blk(m);
441 icb->salt = sa->salt;
442 icb->iv = rte_cpu_to_be_64(sa->seq);
443 icb->cnt = rte_cpu_to_be_32(1);
445 switch (sa->auth_algo) {
446 case RTE_CRYPTO_AUTH_NULL:
447 case RTE_CRYPTO_AUTH_SHA1_HMAC:
448 case RTE_CRYPTO_AUTH_SHA256_HMAC:
449 sym_cop->auth.data.offset = ip_hdr_len;
450 sym_cop->auth.data.length = sizeof(struct esp_hdr) +
451 sa->iv_len + pad_payload_len;
454 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
459 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
460 rte_pktmbuf_pkt_len(m) - sa->digest_len);
461 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
462 rte_pktmbuf_pkt_len(m) - sa->digest_len);
470 esp_outbound_post(struct rte_mbuf *m,
472 struct rte_crypto_op *cop)
474 RTE_ASSERT(m != NULL);
475 RTE_ASSERT(sa != NULL);
477 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
478 m->ol_flags |= PKT_TX_SEC_OFFLOAD;
480 RTE_ASSERT(cop != NULL);
481 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
482 RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");