net: add rte prefix to ESP structure
[dpdk.git] / examples / ipsec-secgw / esp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <sys/types.h>
8 #include <sys/stat.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12 #include <fcntl.h>
13 #include <unistd.h>
14
15 #include <rte_common.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_random.h>
19
20 #include "ipsec.h"
21 #include "esp.h"
22 #include "ipip.h"
23
24 int
25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
26                 struct rte_crypto_op *cop)
27 {
28         struct ip *ip4;
29         struct rte_crypto_sym_op *sym_cop;
30         int32_t payload_len, ip_hdr_len;
31
32         RTE_ASSERT(sa != NULL);
33         if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
34                 return 0;
35
36         RTE_ASSERT(m != NULL);
37         RTE_ASSERT(cop != NULL);
38
39         ip4 = rte_pktmbuf_mtod(m, struct ip *);
40         if (likely(ip4->ip_v == IPVERSION))
41                 ip_hdr_len = ip4->ip_hl * 4;
42         else if (ip4->ip_v == IP6_VERSION)
43                 /* XXX No option headers supported */
44                 ip_hdr_len = sizeof(struct ip6_hdr);
45         else {
46                 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
47                                 ip4->ip_v);
48                 return -EINVAL;
49         }
50
51         payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
52                 sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len;
53
54         if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
55                 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
56                                 payload_len, sa->block_size);
57                 return -EINVAL;
58         }
59
60         sym_cop = get_sym_cop(cop);
61         sym_cop->m_src = m;
62
63         if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
64                 sym_cop->aead.data.offset =
65                         ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len;
66                 sym_cop->aead.data.length = payload_len;
67
68                 struct cnt_blk *icb;
69                 uint8_t *aad;
70                 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
71                                         sizeof(struct rte_esp_hdr));
72
73                 icb = get_cnt_blk(m);
74                 icb->salt = sa->salt;
75                 memcpy(&icb->iv, iv, 8);
76                 icb->cnt = rte_cpu_to_be_32(1);
77
78                 aad = get_aad(m);
79                 memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8);
80                 sym_cop->aead.aad.data = aad;
81                 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
82                                 aad - rte_pktmbuf_mtod(m, uint8_t *));
83
84                 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
85                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
86                 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
87                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
88         } else {
89                 sym_cop->cipher.data.offset =  ip_hdr_len +
90                         sizeof(struct rte_esp_hdr) +
91                         sa->iv_len;
92                 sym_cop->cipher.data.length = payload_len;
93
94                 struct cnt_blk *icb;
95                 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
96                                         sizeof(struct rte_esp_hdr));
97                 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
98                                         uint8_t *, IV_OFFSET);
99
100                 switch (sa->cipher_algo) {
101                 case RTE_CRYPTO_CIPHER_NULL:
102                 case RTE_CRYPTO_CIPHER_3DES_CBC:
103                 case RTE_CRYPTO_CIPHER_AES_CBC:
104                         /* Copy IV at the end of crypto operation */
105                         rte_memcpy(iv_ptr, iv, sa->iv_len);
106                         break;
107                 case RTE_CRYPTO_CIPHER_AES_CTR:
108                         icb = get_cnt_blk(m);
109                         icb->salt = sa->salt;
110                         memcpy(&icb->iv, iv, 8);
111                         icb->cnt = rte_cpu_to_be_32(1);
112                         break;
113                 default:
114                         RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
115                                         sa->cipher_algo);
116                         return -EINVAL;
117                 }
118
119                 switch (sa->auth_algo) {
120                 case RTE_CRYPTO_AUTH_NULL:
121                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
122                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
123                         sym_cop->auth.data.offset = ip_hdr_len;
124                         sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
125                                 sa->iv_len + payload_len;
126                         break;
127                 default:
128                         RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
129                                         sa->auth_algo);
130                         return -EINVAL;
131                 }
132
133                 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
134                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
135                 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
136                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
137         }
138
139         return 0;
140 }
141
142 int
143 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
144                 struct rte_crypto_op *cop)
145 {
146         struct ip *ip4, *ip;
147         struct ip6_hdr *ip6;
148         uint8_t *nexthdr, *pad_len;
149         uint8_t *padding;
150         uint16_t i;
151
152         RTE_ASSERT(m != NULL);
153         RTE_ASSERT(sa != NULL);
154         RTE_ASSERT(cop != NULL);
155
156         if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
157                         (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
158                 if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
159                         if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
160                                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
161                         else
162                                 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
163                 } else
164                         cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
165         }
166
167         if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
168                 RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__);
169                 return -1;
170         }
171
172         if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
173             sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
174                 nexthdr = &m->inner_esp_next_proto;
175         } else {
176                 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
177                                 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
178                 pad_len = nexthdr - 1;
179
180                 padding = pad_len - *pad_len;
181                 for (i = 0; i < *pad_len; i++) {
182                         if (padding[i] != i + 1) {
183                                 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
184                                 return -EINVAL;
185                         }
186                 }
187
188                 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
189                         RTE_LOG(ERR, IPSEC_ESP,
190                                         "failed to remove pad_len + digest\n");
191                         return -EINVAL;
192                 }
193         }
194
195         if (unlikely(sa->flags == TRANSPORT)) {
196                 ip = rte_pktmbuf_mtod(m, struct ip *);
197                 ip4 = (struct ip *)rte_pktmbuf_adj(m,
198                                 sizeof(struct rte_esp_hdr) + sa->iv_len);
199                 if (likely(ip->ip_v == IPVERSION)) {
200                         memmove(ip4, ip, ip->ip_hl * 4);
201                         ip4->ip_p = *nexthdr;
202                         ip4->ip_len = htons(rte_pktmbuf_data_len(m));
203                 } else {
204                         ip6 = (struct ip6_hdr *)ip4;
205                         /* XXX No option headers supported */
206                         memmove(ip6, ip, sizeof(struct ip6_hdr));
207                         ip6->ip6_nxt = *nexthdr;
208                         ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
209                                               sizeof(struct ip6_hdr));
210                 }
211         } else
212                 ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len);
213
214         return 0;
215 }
216
217 int
218 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
219                 struct rte_crypto_op *cop)
220 {
221         struct ip *ip4;
222         struct ip6_hdr *ip6;
223         struct rte_esp_hdr *esp = NULL;
224         uint8_t *padding = NULL, *new_ip, nlp;
225         struct rte_crypto_sym_op *sym_cop;
226         int32_t i;
227         uint16_t pad_payload_len, pad_len, ip_hdr_len;
228
229         RTE_ASSERT(m != NULL);
230         RTE_ASSERT(sa != NULL);
231
232         ip_hdr_len = 0;
233
234         ip4 = rte_pktmbuf_mtod(m, struct ip *);
235         if (likely(ip4->ip_v == IPVERSION)) {
236                 if (unlikely(sa->flags == TRANSPORT)) {
237                         ip_hdr_len = ip4->ip_hl * 4;
238                         nlp = ip4->ip_p;
239                 } else
240                         nlp = IPPROTO_IPIP;
241         } else if (ip4->ip_v == IP6_VERSION) {
242                 if (unlikely(sa->flags == TRANSPORT)) {
243                         /* XXX No option headers supported */
244                         ip_hdr_len = sizeof(struct ip6_hdr);
245                         ip6 = (struct ip6_hdr *)ip4;
246                         nlp = ip6->ip6_nxt;
247                 } else
248                         nlp = IPPROTO_IPV6;
249         } else {
250                 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
251                                 ip4->ip_v);
252                 return -EINVAL;
253         }
254
255         /* Padded payload length */
256         pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
257                         ip_hdr_len + 2, sa->block_size);
258         pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
259
260         RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
261                         sa->flags == TRANSPORT);
262
263         if (likely(sa->flags == IP4_TUNNEL))
264                 ip_hdr_len = sizeof(struct ip);
265         else if (sa->flags == IP6_TUNNEL)
266                 ip_hdr_len = sizeof(struct ip6_hdr);
267         else if (sa->flags != TRANSPORT) {
268                 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
269                                 sa->flags);
270                 return -EINVAL;
271         }
272
273         /* Check maximum packet size */
274         if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len +
275                         pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
276                 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
277                 return -EINVAL;
278         }
279
280         /* Add trailer padding if it is not constructed by HW */
281         if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
282             (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
283              !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
284                 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
285                                                         sa->digest_len);
286                 if (unlikely(padding == NULL)) {
287                         RTE_LOG(ERR, IPSEC_ESP,
288                                         "not enough mbuf trailing space\n");
289                         return -ENOSPC;
290                 }
291                 rte_prefetch0(padding);
292         }
293
294         switch (sa->flags) {
295         case IP4_TUNNEL:
296                 ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
297                                 &sa->src, &sa->dst);
298                 esp = (struct rte_esp_hdr *)(ip4 + 1);
299                 break;
300         case IP6_TUNNEL:
301                 ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
302                                 &sa->src, &sa->dst);
303                 esp = (struct rte_esp_hdr *)(ip6 + 1);
304                 break;
305         case TRANSPORT:
306                 new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
307                                 sizeof(struct rte_esp_hdr) + sa->iv_len);
308                 memmove(new_ip, ip4, ip_hdr_len);
309                 esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len);
310                 ip4 = (struct ip *)new_ip;
311                 if (likely(ip4->ip_v == IPVERSION)) {
312                         ip4->ip_p = IPPROTO_ESP;
313                         ip4->ip_len = htons(rte_pktmbuf_data_len(m));
314                 } else {
315                         ip6 = (struct ip6_hdr *)new_ip;
316                         ip6->ip6_nxt = IPPROTO_ESP;
317                         ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
318                                               sizeof(struct ip6_hdr));
319                 }
320         }
321
322         sa->seq++;
323         esp->spi = rte_cpu_to_be_32(sa->spi);
324         esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
325
326         /* set iv */
327         uint64_t *iv = (uint64_t *)(esp + 1);
328         if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
329                 *iv = rte_cpu_to_be_64(sa->seq);
330         } else {
331                 switch (sa->cipher_algo) {
332                 case RTE_CRYPTO_CIPHER_NULL:
333                 case RTE_CRYPTO_CIPHER_3DES_CBC:
334                 case RTE_CRYPTO_CIPHER_AES_CBC:
335                         memset(iv, 0, sa->iv_len);
336                         break;
337                 case RTE_CRYPTO_CIPHER_AES_CTR:
338                         *iv = rte_cpu_to_be_64(sa->seq);
339                         break;
340                 default:
341                         RTE_LOG(ERR, IPSEC_ESP,
342                                 "unsupported cipher algorithm %u\n",
343                                 sa->cipher_algo);
344                         return -EINVAL;
345                 }
346         }
347
348         if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
349                 if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
350                         /* Set the inner esp next protocol for HW trailer */
351                         m->inner_esp_next_proto = nlp;
352                         m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
353                 } else {
354                         padding[pad_len - 2] = pad_len - 2;
355                         padding[pad_len - 1] = nlp;
356                 }
357                 goto done;
358         }
359
360         RTE_ASSERT(cop != NULL);
361         sym_cop = get_sym_cop(cop);
362         sym_cop->m_src = m;
363
364         if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
365                 uint8_t *aad;
366
367                 sym_cop->aead.data.offset = ip_hdr_len +
368                         sizeof(struct rte_esp_hdr) + sa->iv_len;
369                 sym_cop->aead.data.length = pad_payload_len;
370
371                 /* Fill pad_len using default sequential scheme */
372                 for (i = 0; i < pad_len - 2; i++)
373                         padding[i] = i + 1;
374                 padding[pad_len - 2] = pad_len - 2;
375                 padding[pad_len - 1] = nlp;
376
377                 struct cnt_blk *icb = get_cnt_blk(m);
378                 icb->salt = sa->salt;
379                 icb->iv = rte_cpu_to_be_64(sa->seq);
380                 icb->cnt = rte_cpu_to_be_32(1);
381
382                 aad = get_aad(m);
383                 memcpy(aad, esp, 8);
384                 sym_cop->aead.aad.data = aad;
385                 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
386                                 aad - rte_pktmbuf_mtod(m, uint8_t *));
387
388                 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
389                         rte_pktmbuf_pkt_len(m) - sa->digest_len);
390                 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
391                         rte_pktmbuf_pkt_len(m) - sa->digest_len);
392         } else {
393                 switch (sa->cipher_algo) {
394                 case RTE_CRYPTO_CIPHER_NULL:
395                 case RTE_CRYPTO_CIPHER_3DES_CBC:
396                 case RTE_CRYPTO_CIPHER_AES_CBC:
397                         sym_cop->cipher.data.offset = ip_hdr_len +
398                                 sizeof(struct rte_esp_hdr);
399                         sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
400                         break;
401                 case RTE_CRYPTO_CIPHER_AES_CTR:
402                         sym_cop->cipher.data.offset = ip_hdr_len +
403                                 sizeof(struct rte_esp_hdr) + sa->iv_len;
404                         sym_cop->cipher.data.length = pad_payload_len;
405                         break;
406                 default:
407                         RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
408                                         sa->cipher_algo);
409                         return -EINVAL;
410                 }
411
412                 /* Fill pad_len using default sequential scheme */
413                 for (i = 0; i < pad_len - 2; i++)
414                         padding[i] = i + 1;
415                 padding[pad_len - 2] = pad_len - 2;
416                 padding[pad_len - 1] = nlp;
417
418                 struct cnt_blk *icb = get_cnt_blk(m);
419                 icb->salt = sa->salt;
420                 icb->iv = rte_cpu_to_be_64(sa->seq);
421                 icb->cnt = rte_cpu_to_be_32(1);
422
423                 switch (sa->auth_algo) {
424                 case RTE_CRYPTO_AUTH_NULL:
425                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
426                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
427                         sym_cop->auth.data.offset = ip_hdr_len;
428                         sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
429                                 sa->iv_len + pad_payload_len;
430                         break;
431                 default:
432                         RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
433                                         sa->auth_algo);
434                         return -EINVAL;
435                 }
436
437                 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
438                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
439                 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
440                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
441         }
442
443 done:
444         return 0;
445 }
446
447 int
448 esp_outbound_post(struct rte_mbuf *m,
449                   struct ipsec_sa *sa,
450                   struct rte_crypto_op *cop)
451 {
452         RTE_ASSERT(m != NULL);
453         RTE_ASSERT(sa != NULL);
454
455         if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
456                         (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
457                 m->ol_flags |= PKT_TX_SEC_OFFLOAD;
458         } else {
459                 RTE_ASSERT(cop != NULL);
460                 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
461                         RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n",
462                                 __func__);
463                         return -1;
464                 }
465         }
466
467         return 0;
468 }