1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
15 #include <rte_common.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_random.h>
25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
26 struct rte_crypto_op *cop)
29 struct rte_crypto_sym_op *sym_cop;
30 int32_t payload_len, ip_hdr_len;
32 RTE_ASSERT(sa != NULL);
33 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
36 RTE_ASSERT(m != NULL);
37 RTE_ASSERT(cop != NULL);
39 ip4 = rte_pktmbuf_mtod(m, struct ip *);
40 if (likely(ip4->ip_v == IPVERSION))
41 ip_hdr_len = ip4->ip_hl * 4;
42 else if (ip4->ip_v == IP6_VERSION)
43 /* XXX No option headers supported */
44 ip_hdr_len = sizeof(struct ip6_hdr);
46 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
51 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
52 sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len;
54 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
55 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
56 payload_len, sa->block_size);
60 sym_cop = get_sym_cop(cop);
63 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
64 sym_cop->aead.data.offset =
65 ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len;
66 sym_cop->aead.data.length = payload_len;
70 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
71 sizeof(struct rte_esp_hdr));
75 memcpy(&icb->iv, iv, 8);
76 icb->cnt = rte_cpu_to_be_32(1);
79 memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8);
80 sym_cop->aead.aad.data = aad;
81 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
82 aad - rte_pktmbuf_mtod(m, uint8_t *));
84 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
85 rte_pktmbuf_pkt_len(m) - sa->digest_len);
86 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
87 rte_pktmbuf_pkt_len(m) - sa->digest_len);
89 sym_cop->cipher.data.offset = ip_hdr_len +
90 sizeof(struct rte_esp_hdr) +
92 sym_cop->cipher.data.length = payload_len;
95 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
96 sizeof(struct rte_esp_hdr));
97 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
98 uint8_t *, IV_OFFSET);
100 switch (sa->cipher_algo) {
101 case RTE_CRYPTO_CIPHER_NULL:
102 case RTE_CRYPTO_CIPHER_3DES_CBC:
103 case RTE_CRYPTO_CIPHER_AES_CBC:
104 /* Copy IV at the end of crypto operation */
105 rte_memcpy(iv_ptr, iv, sa->iv_len);
107 case RTE_CRYPTO_CIPHER_AES_CTR:
108 icb = get_cnt_blk(m);
109 icb->salt = sa->salt;
110 memcpy(&icb->iv, iv, 8);
111 icb->cnt = rte_cpu_to_be_32(1);
114 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
119 switch (sa->auth_algo) {
120 case RTE_CRYPTO_AUTH_NULL:
121 case RTE_CRYPTO_AUTH_SHA1_HMAC:
122 case RTE_CRYPTO_AUTH_SHA256_HMAC:
123 sym_cop->auth.data.offset = ip_hdr_len;
124 sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
125 sa->iv_len + payload_len;
128 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
133 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
134 rte_pktmbuf_pkt_len(m) - sa->digest_len);
135 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
136 rte_pktmbuf_pkt_len(m) - sa->digest_len);
143 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
144 struct rte_crypto_op *cop)
148 uint8_t *nexthdr, *pad_len;
152 RTE_ASSERT(m != NULL);
153 RTE_ASSERT(sa != NULL);
154 RTE_ASSERT(cop != NULL);
156 if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
157 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
158 if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
159 if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
160 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
162 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
164 cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
167 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
168 RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__);
172 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
173 sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
174 nexthdr = &m->inner_esp_next_proto;
176 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
177 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
178 pad_len = nexthdr - 1;
180 padding = pad_len - *pad_len;
181 for (i = 0; i < *pad_len; i++) {
182 if (padding[i] != i + 1) {
183 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
188 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
189 RTE_LOG(ERR, IPSEC_ESP,
190 "failed to remove pad_len + digest\n");
195 if (unlikely(sa->flags == TRANSPORT)) {
196 ip = rte_pktmbuf_mtod(m, struct ip *);
197 ip4 = (struct ip *)rte_pktmbuf_adj(m,
198 sizeof(struct rte_esp_hdr) + sa->iv_len);
199 if (likely(ip->ip_v == IPVERSION)) {
200 memmove(ip4, ip, ip->ip_hl * 4);
201 ip4->ip_p = *nexthdr;
202 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
204 ip6 = (struct ip6_hdr *)ip4;
205 /* XXX No option headers supported */
206 memmove(ip6, ip, sizeof(struct ip6_hdr));
207 ip6->ip6_nxt = *nexthdr;
208 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
209 sizeof(struct ip6_hdr));
212 ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len);
218 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
219 struct rte_crypto_op *cop)
223 struct rte_esp_hdr *esp = NULL;
224 uint8_t *padding = NULL, *new_ip, nlp;
225 struct rte_crypto_sym_op *sym_cop;
227 uint16_t pad_payload_len, pad_len, ip_hdr_len;
229 RTE_ASSERT(m != NULL);
230 RTE_ASSERT(sa != NULL);
234 ip4 = rte_pktmbuf_mtod(m, struct ip *);
235 if (likely(ip4->ip_v == IPVERSION)) {
236 if (unlikely(sa->flags == TRANSPORT)) {
237 ip_hdr_len = ip4->ip_hl * 4;
241 } else if (ip4->ip_v == IP6_VERSION) {
242 if (unlikely(sa->flags == TRANSPORT)) {
243 /* XXX No option headers supported */
244 ip_hdr_len = sizeof(struct ip6_hdr);
245 ip6 = (struct ip6_hdr *)ip4;
250 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
255 /* Padded payload length */
256 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
257 ip_hdr_len + 2, sa->block_size);
258 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
260 RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
261 sa->flags == TRANSPORT);
263 if (likely(sa->flags == IP4_TUNNEL))
264 ip_hdr_len = sizeof(struct ip);
265 else if (sa->flags == IP6_TUNNEL)
266 ip_hdr_len = sizeof(struct ip6_hdr);
267 else if (sa->flags != TRANSPORT) {
268 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
273 /* Check maximum packet size */
274 if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len +
275 pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
276 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
280 /* Add trailer padding if it is not constructed by HW */
281 if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
282 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
283 !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
284 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
286 if (unlikely(padding == NULL)) {
287 RTE_LOG(ERR, IPSEC_ESP,
288 "not enough mbuf trailing space\n");
291 rte_prefetch0(padding);
296 ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
298 esp = (struct rte_esp_hdr *)(ip4 + 1);
301 ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
303 esp = (struct rte_esp_hdr *)(ip6 + 1);
306 new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
307 sizeof(struct rte_esp_hdr) + sa->iv_len);
308 memmove(new_ip, ip4, ip_hdr_len);
309 esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len);
310 ip4 = (struct ip *)new_ip;
311 if (likely(ip4->ip_v == IPVERSION)) {
312 ip4->ip_p = IPPROTO_ESP;
313 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
315 ip6 = (struct ip6_hdr *)new_ip;
316 ip6->ip6_nxt = IPPROTO_ESP;
317 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
318 sizeof(struct ip6_hdr));
323 esp->spi = rte_cpu_to_be_32(sa->spi);
324 esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
327 uint64_t *iv = (uint64_t *)(esp + 1);
328 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
329 *iv = rte_cpu_to_be_64(sa->seq);
331 switch (sa->cipher_algo) {
332 case RTE_CRYPTO_CIPHER_NULL:
333 case RTE_CRYPTO_CIPHER_3DES_CBC:
334 case RTE_CRYPTO_CIPHER_AES_CBC:
335 memset(iv, 0, sa->iv_len);
337 case RTE_CRYPTO_CIPHER_AES_CTR:
338 *iv = rte_cpu_to_be_64(sa->seq);
341 RTE_LOG(ERR, IPSEC_ESP,
342 "unsupported cipher algorithm %u\n",
348 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
349 if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
350 /* Set the inner esp next protocol for HW trailer */
351 m->inner_esp_next_proto = nlp;
352 m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
354 padding[pad_len - 2] = pad_len - 2;
355 padding[pad_len - 1] = nlp;
360 RTE_ASSERT(cop != NULL);
361 sym_cop = get_sym_cop(cop);
364 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
367 sym_cop->aead.data.offset = ip_hdr_len +
368 sizeof(struct rte_esp_hdr) + sa->iv_len;
369 sym_cop->aead.data.length = pad_payload_len;
371 /* Fill pad_len using default sequential scheme */
372 for (i = 0; i < pad_len - 2; i++)
374 padding[pad_len - 2] = pad_len - 2;
375 padding[pad_len - 1] = nlp;
377 struct cnt_blk *icb = get_cnt_blk(m);
378 icb->salt = sa->salt;
379 icb->iv = rte_cpu_to_be_64(sa->seq);
380 icb->cnt = rte_cpu_to_be_32(1);
384 sym_cop->aead.aad.data = aad;
385 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
386 aad - rte_pktmbuf_mtod(m, uint8_t *));
388 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
389 rte_pktmbuf_pkt_len(m) - sa->digest_len);
390 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
391 rte_pktmbuf_pkt_len(m) - sa->digest_len);
393 switch (sa->cipher_algo) {
394 case RTE_CRYPTO_CIPHER_NULL:
395 case RTE_CRYPTO_CIPHER_3DES_CBC:
396 case RTE_CRYPTO_CIPHER_AES_CBC:
397 sym_cop->cipher.data.offset = ip_hdr_len +
398 sizeof(struct rte_esp_hdr);
399 sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
401 case RTE_CRYPTO_CIPHER_AES_CTR:
402 sym_cop->cipher.data.offset = ip_hdr_len +
403 sizeof(struct rte_esp_hdr) + sa->iv_len;
404 sym_cop->cipher.data.length = pad_payload_len;
407 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
412 /* Fill pad_len using default sequential scheme */
413 for (i = 0; i < pad_len - 2; i++)
415 padding[pad_len - 2] = pad_len - 2;
416 padding[pad_len - 1] = nlp;
418 struct cnt_blk *icb = get_cnt_blk(m);
419 icb->salt = sa->salt;
420 icb->iv = rte_cpu_to_be_64(sa->seq);
421 icb->cnt = rte_cpu_to_be_32(1);
423 switch (sa->auth_algo) {
424 case RTE_CRYPTO_AUTH_NULL:
425 case RTE_CRYPTO_AUTH_SHA1_HMAC:
426 case RTE_CRYPTO_AUTH_SHA256_HMAC:
427 sym_cop->auth.data.offset = ip_hdr_len;
428 sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
429 sa->iv_len + pad_payload_len;
432 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
437 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
438 rte_pktmbuf_pkt_len(m) - sa->digest_len);
439 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
440 rte_pktmbuf_pkt_len(m) - sa->digest_len);
448 esp_outbound_post(struct rte_mbuf *m,
450 struct rte_crypto_op *cop)
452 RTE_ASSERT(m != NULL);
453 RTE_ASSERT(sa != NULL);
455 if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
456 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
457 m->ol_flags |= PKT_TX_SEC_OFFLOAD;
459 RTE_ASSERT(cop != NULL);
460 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
461 RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n",