build: use static deps for pkg-config libs.private
[dpdk.git] / examples / ipsec-secgw / esp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <sys/types.h>
8 #include <sys/stat.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12 #include <fcntl.h>
13 #include <unistd.h>
14
15 #include <rte_common.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_random.h>
19
20 #include "ipsec.h"
21 #include "esp.h"
22 #include "ipip.h"
23
24 int
25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
26                 struct rte_crypto_op *cop)
27 {
28         struct ip *ip4;
29         struct rte_crypto_sym_op *sym_cop;
30         int32_t payload_len, ip_hdr_len;
31
32         RTE_ASSERT(sa != NULL);
33         if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
34                 return 0;
35
36         RTE_ASSERT(m != NULL);
37         RTE_ASSERT(cop != NULL);
38
39         ip4 = rte_pktmbuf_mtod(m, struct ip *);
40         if (likely(ip4->ip_v == IPVERSION))
41                 ip_hdr_len = ip4->ip_hl * 4;
42         else if (ip4->ip_v == IP6_VERSION)
43                 /* XXX No option headers supported */
44                 ip_hdr_len = sizeof(struct ip6_hdr);
45         else {
46                 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
47                                 ip4->ip_v);
48                 return -EINVAL;
49         }
50
51         payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
52                 sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
53
54         if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
55                 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
56                                 payload_len, sa->block_size);
57                 return -EINVAL;
58         }
59
60         sym_cop = get_sym_cop(cop);
61         sym_cop->m_src = m;
62
63         if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
64                 sym_cop->aead.data.offset =  ip_hdr_len + sizeof(struct esp_hdr) +
65                         sa->iv_len;
66                 sym_cop->aead.data.length = payload_len;
67
68                 struct cnt_blk *icb;
69                 uint8_t *aad;
70                 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
71
72                 icb = get_cnt_blk(m);
73                 icb->salt = sa->salt;
74                 memcpy(&icb->iv, iv, 8);
75                 icb->cnt = rte_cpu_to_be_32(1);
76
77                 aad = get_aad(m);
78                 memcpy(aad, iv - sizeof(struct esp_hdr), 8);
79                 sym_cop->aead.aad.data = aad;
80                 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
81                                 aad - rte_pktmbuf_mtod(m, uint8_t *));
82
83                 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
84                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
85                 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
86                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
87         } else {
88                 sym_cop->cipher.data.offset =  ip_hdr_len + sizeof(struct esp_hdr) +
89                         sa->iv_len;
90                 sym_cop->cipher.data.length = payload_len;
91
92                 struct cnt_blk *icb;
93                 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
94                 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
95                                         uint8_t *, IV_OFFSET);
96
97                 switch (sa->cipher_algo) {
98                 case RTE_CRYPTO_CIPHER_NULL:
99                 case RTE_CRYPTO_CIPHER_3DES_CBC:
100                 case RTE_CRYPTO_CIPHER_AES_CBC:
101                         /* Copy IV at the end of crypto operation */
102                         rte_memcpy(iv_ptr, iv, sa->iv_len);
103                         break;
104                 case RTE_CRYPTO_CIPHER_AES_CTR:
105                         icb = get_cnt_blk(m);
106                         icb->salt = sa->salt;
107                         memcpy(&icb->iv, iv, 8);
108                         icb->cnt = rte_cpu_to_be_32(1);
109                         break;
110                 default:
111                         RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
112                                         sa->cipher_algo);
113                         return -EINVAL;
114                 }
115
116                 switch (sa->auth_algo) {
117                 case RTE_CRYPTO_AUTH_NULL:
118                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
119                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
120                         sym_cop->auth.data.offset = ip_hdr_len;
121                         sym_cop->auth.data.length = sizeof(struct esp_hdr) +
122                                 sa->iv_len + payload_len;
123                         break;
124                 default:
125                         RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
126                                         sa->auth_algo);
127                         return -EINVAL;
128                 }
129
130                 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
131                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
132                 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
133                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
134         }
135
136         return 0;
137 }
138
139 int
140 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
141                 struct rte_crypto_op *cop)
142 {
143         struct ip *ip4, *ip;
144         struct ip6_hdr *ip6;
145         uint8_t *nexthdr, *pad_len;
146         uint8_t *padding;
147         uint16_t i;
148
149         RTE_ASSERT(m != NULL);
150         RTE_ASSERT(sa != NULL);
151         RTE_ASSERT(cop != NULL);
152
153         if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
154                         (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
155                 if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
156                         if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
157                                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
158                         else
159                                 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
160                 } else
161                         cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
162         }
163
164         if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
165                 RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
166                 return -1;
167         }
168
169         if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
170             sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
171                 nexthdr = &m->inner_esp_next_proto;
172         } else {
173                 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
174                                 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
175                 pad_len = nexthdr - 1;
176
177                 padding = pad_len - *pad_len;
178                 for (i = 0; i < *pad_len; i++) {
179                         if (padding[i] != i + 1) {
180                                 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
181                                 return -EINVAL;
182                         }
183                 }
184
185                 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
186                         RTE_LOG(ERR, IPSEC_ESP,
187                                         "failed to remove pad_len + digest\n");
188                         return -EINVAL;
189                 }
190         }
191
192         if (unlikely(sa->flags == TRANSPORT)) {
193                 ip = rte_pktmbuf_mtod(m, struct ip *);
194                 ip4 = (struct ip *)rte_pktmbuf_adj(m,
195                                 sizeof(struct esp_hdr) + sa->iv_len);
196                 if (likely(ip->ip_v == IPVERSION)) {
197                         memmove(ip4, ip, ip->ip_hl * 4);
198                         ip4->ip_p = *nexthdr;
199                         ip4->ip_len = htons(rte_pktmbuf_data_len(m));
200                 } else {
201                         ip6 = (struct ip6_hdr *)ip4;
202                         /* XXX No option headers supported */
203                         memmove(ip6, ip, sizeof(struct ip6_hdr));
204                         ip6->ip6_nxt = *nexthdr;
205                         ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
206                                               sizeof(struct ip6_hdr));
207                 }
208         } else
209                 ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
210
211         return 0;
212 }
213
214 int
215 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
216                 struct rte_crypto_op *cop)
217 {
218         struct ip *ip4;
219         struct ip6_hdr *ip6;
220         struct esp_hdr *esp = NULL;
221         uint8_t *padding = NULL, *new_ip, nlp;
222         struct rte_crypto_sym_op *sym_cop;
223         int32_t i;
224         uint16_t pad_payload_len, pad_len, ip_hdr_len;
225
226         RTE_ASSERT(m != NULL);
227         RTE_ASSERT(sa != NULL);
228
229         ip_hdr_len = 0;
230
231         ip4 = rte_pktmbuf_mtod(m, struct ip *);
232         if (likely(ip4->ip_v == IPVERSION)) {
233                 if (unlikely(sa->flags == TRANSPORT)) {
234                         ip_hdr_len = ip4->ip_hl * 4;
235                         nlp = ip4->ip_p;
236                 } else
237                         nlp = IPPROTO_IPIP;
238         } else if (ip4->ip_v == IP6_VERSION) {
239                 if (unlikely(sa->flags == TRANSPORT)) {
240                         /* XXX No option headers supported */
241                         ip_hdr_len = sizeof(struct ip6_hdr);
242                         ip6 = (struct ip6_hdr *)ip4;
243                         nlp = ip6->ip6_nxt;
244                 } else
245                         nlp = IPPROTO_IPV6;
246         } else {
247                 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
248                                 ip4->ip_v);
249                 return -EINVAL;
250         }
251
252         /* Padded payload length */
253         pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
254                         ip_hdr_len + 2, sa->block_size);
255         pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
256
257         RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
258                         sa->flags == TRANSPORT);
259
260         if (likely(sa->flags == IP4_TUNNEL))
261                 ip_hdr_len = sizeof(struct ip);
262         else if (sa->flags == IP6_TUNNEL)
263                 ip_hdr_len = sizeof(struct ip6_hdr);
264         else if (sa->flags != TRANSPORT) {
265                 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
266                                 sa->flags);
267                 return -EINVAL;
268         }
269
270         /* Check maximum packet size */
271         if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
272                         pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
273                 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
274                 return -EINVAL;
275         }
276
277         /* Add trailer padding if it is not constructed by HW */
278         if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
279             (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
280              !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
281                 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
282                                                         sa->digest_len);
283                 if (unlikely(padding == NULL)) {
284                         RTE_LOG(ERR, IPSEC_ESP,
285                                         "not enough mbuf trailing space\n");
286                         return -ENOSPC;
287                 }
288                 rte_prefetch0(padding);
289         }
290
291         switch (sa->flags) {
292         case IP4_TUNNEL:
293                 ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
294                                 &sa->src, &sa->dst);
295                 esp = (struct esp_hdr *)(ip4 + 1);
296                 break;
297         case IP6_TUNNEL:
298                 ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
299                                 &sa->src, &sa->dst);
300                 esp = (struct esp_hdr *)(ip6 + 1);
301                 break;
302         case TRANSPORT:
303                 new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
304                                 sizeof(struct esp_hdr) + sa->iv_len);
305                 memmove(new_ip, ip4, ip_hdr_len);
306                 esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
307                 ip4 = (struct ip *)new_ip;
308                 if (likely(ip4->ip_v == IPVERSION)) {
309                         ip4->ip_p = IPPROTO_ESP;
310                         ip4->ip_len = htons(rte_pktmbuf_data_len(m));
311                 } else {
312                         ip6 = (struct ip6_hdr *)new_ip;
313                         ip6->ip6_nxt = IPPROTO_ESP;
314                         ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
315                                               sizeof(struct ip6_hdr));
316                 }
317         }
318
319         sa->seq++;
320         esp->spi = rte_cpu_to_be_32(sa->spi);
321         esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
322
323         /* set iv */
324         uint64_t *iv = (uint64_t *)(esp + 1);
325         if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
326                 *iv = rte_cpu_to_be_64(sa->seq);
327         } else {
328                 switch (sa->cipher_algo) {
329                 case RTE_CRYPTO_CIPHER_NULL:
330                 case RTE_CRYPTO_CIPHER_3DES_CBC:
331                 case RTE_CRYPTO_CIPHER_AES_CBC:
332                         memset(iv, 0, sa->iv_len);
333                         break;
334                 case RTE_CRYPTO_CIPHER_AES_CTR:
335                         *iv = rte_cpu_to_be_64(sa->seq);
336                         break;
337                 default:
338                         RTE_LOG(ERR, IPSEC_ESP,
339                                 "unsupported cipher algorithm %u\n",
340                                 sa->cipher_algo);
341                         return -EINVAL;
342                 }
343         }
344
345         if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
346                 if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
347                         /* Set the inner esp next protocol for HW trailer */
348                         m->inner_esp_next_proto = nlp;
349                         m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
350                 } else {
351                         padding[pad_len - 2] = pad_len - 2;
352                         padding[pad_len - 1] = nlp;
353                 }
354                 goto done;
355         }
356
357         RTE_ASSERT(cop != NULL);
358         sym_cop = get_sym_cop(cop);
359         sym_cop->m_src = m;
360
361         if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
362                 uint8_t *aad;
363
364                 sym_cop->aead.data.offset = ip_hdr_len +
365                         sizeof(struct esp_hdr) + sa->iv_len;
366                 sym_cop->aead.data.length = pad_payload_len;
367
368                 /* Fill pad_len using default sequential scheme */
369                 for (i = 0; i < pad_len - 2; i++)
370                         padding[i] = i + 1;
371                 padding[pad_len - 2] = pad_len - 2;
372                 padding[pad_len - 1] = nlp;
373
374                 struct cnt_blk *icb = get_cnt_blk(m);
375                 icb->salt = sa->salt;
376                 icb->iv = rte_cpu_to_be_64(sa->seq);
377                 icb->cnt = rte_cpu_to_be_32(1);
378
379                 aad = get_aad(m);
380                 memcpy(aad, esp, 8);
381                 sym_cop->aead.aad.data = aad;
382                 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
383                                 aad - rte_pktmbuf_mtod(m, uint8_t *));
384
385                 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
386                         rte_pktmbuf_pkt_len(m) - sa->digest_len);
387                 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
388                         rte_pktmbuf_pkt_len(m) - sa->digest_len);
389         } else {
390                 switch (sa->cipher_algo) {
391                 case RTE_CRYPTO_CIPHER_NULL:
392                 case RTE_CRYPTO_CIPHER_3DES_CBC:
393                 case RTE_CRYPTO_CIPHER_AES_CBC:
394                         sym_cop->cipher.data.offset = ip_hdr_len +
395                                 sizeof(struct esp_hdr);
396                         sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
397                         break;
398                 case RTE_CRYPTO_CIPHER_AES_CTR:
399                         sym_cop->cipher.data.offset = ip_hdr_len +
400                                 sizeof(struct esp_hdr) + sa->iv_len;
401                         sym_cop->cipher.data.length = pad_payload_len;
402                         break;
403                 default:
404                         RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
405                                         sa->cipher_algo);
406                         return -EINVAL;
407                 }
408
409                 /* Fill pad_len using default sequential scheme */
410                 for (i = 0; i < pad_len - 2; i++)
411                         padding[i] = i + 1;
412                 padding[pad_len - 2] = pad_len - 2;
413                 padding[pad_len - 1] = nlp;
414
415                 struct cnt_blk *icb = get_cnt_blk(m);
416                 icb->salt = sa->salt;
417                 icb->iv = rte_cpu_to_be_64(sa->seq);
418                 icb->cnt = rte_cpu_to_be_32(1);
419
420                 switch (sa->auth_algo) {
421                 case RTE_CRYPTO_AUTH_NULL:
422                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
423                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
424                         sym_cop->auth.data.offset = ip_hdr_len;
425                         sym_cop->auth.data.length = sizeof(struct esp_hdr) +
426                                 sa->iv_len + pad_payload_len;
427                         break;
428                 default:
429                         RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
430                                         sa->auth_algo);
431                         return -EINVAL;
432                 }
433
434                 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
435                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
436                 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
437                                 rte_pktmbuf_pkt_len(m) - sa->digest_len);
438         }
439
440 done:
441         return 0;
442 }
443
444 int
445 esp_outbound_post(struct rte_mbuf *m,
446                   struct ipsec_sa *sa,
447                   struct rte_crypto_op *cop)
448 {
449         RTE_ASSERT(m != NULL);
450         RTE_ASSERT(sa != NULL);
451
452         if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
453                         (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
454                 m->ol_flags |= PKT_TX_SEC_OFFLOAD;
455         } else {
456                 RTE_ASSERT(cop != NULL);
457                 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
458                         RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
459                         return -1;
460                 }
461         }
462
463         return 0;
464 }