1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
15 #include <rte_common.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_random.h>
25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
26 struct rte_crypto_op *cop)
29 struct rte_crypto_sym_op *sym_cop;
30 int32_t payload_len, ip_hdr_len;
32 RTE_ASSERT(sa != NULL);
33 if (ipsec_get_action_type(sa) ==
34 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
37 RTE_ASSERT(m != NULL);
38 RTE_ASSERT(cop != NULL);
40 ip4 = rte_pktmbuf_mtod(m, struct ip *);
41 if (likely(ip4->ip_v == IPVERSION))
42 ip_hdr_len = ip4->ip_hl * 4;
43 else if (ip4->ip_v == IP6_VERSION)
44 /* XXX No option headers supported */
45 ip_hdr_len = sizeof(struct ip6_hdr);
47 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
52 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
53 sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len;
55 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
56 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
57 payload_len, sa->block_size);
61 sym_cop = get_sym_cop(cop);
64 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
65 sym_cop->aead.data.offset =
66 ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len;
67 sym_cop->aead.data.length = payload_len;
71 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
72 sizeof(struct rte_esp_hdr));
76 memcpy(&icb->iv, iv, 8);
77 icb->cnt = rte_cpu_to_be_32(1);
80 memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8);
81 sym_cop->aead.aad.data = aad;
82 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
83 aad - rte_pktmbuf_mtod(m, uint8_t *));
85 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
86 rte_pktmbuf_pkt_len(m) - sa->digest_len);
87 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
88 rte_pktmbuf_pkt_len(m) - sa->digest_len);
90 sym_cop->cipher.data.offset = ip_hdr_len +
91 sizeof(struct rte_esp_hdr) +
93 sym_cop->cipher.data.length = payload_len;
96 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
97 sizeof(struct rte_esp_hdr));
98 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
99 uint8_t *, IV_OFFSET);
101 switch (sa->cipher_algo) {
102 case RTE_CRYPTO_CIPHER_NULL:
103 case RTE_CRYPTO_CIPHER_3DES_CBC:
104 case RTE_CRYPTO_CIPHER_AES_CBC:
105 /* Copy IV at the end of crypto operation */
106 rte_memcpy(iv_ptr, iv, sa->iv_len);
108 case RTE_CRYPTO_CIPHER_AES_CTR:
109 icb = get_cnt_blk(m);
110 icb->salt = sa->salt;
111 memcpy(&icb->iv, iv, 8);
112 icb->cnt = rte_cpu_to_be_32(1);
115 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
120 switch (sa->auth_algo) {
121 case RTE_CRYPTO_AUTH_NULL:
122 case RTE_CRYPTO_AUTH_SHA1_HMAC:
123 case RTE_CRYPTO_AUTH_SHA256_HMAC:
124 sym_cop->auth.data.offset = ip_hdr_len;
125 sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
126 sa->iv_len + payload_len;
129 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
134 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
135 rte_pktmbuf_pkt_len(m) - sa->digest_len);
136 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
137 rte_pktmbuf_pkt_len(m) - sa->digest_len);
144 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
145 struct rte_crypto_op *cop)
149 uint8_t *nexthdr, *pad_len;
152 struct rte_ipsec_session *ips;
154 RTE_ASSERT(m != NULL);
155 RTE_ASSERT(sa != NULL);
156 RTE_ASSERT(cop != NULL);
158 ips = ipsec_get_primary_session(sa);
160 if ((ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
161 (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
162 if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
163 if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
164 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
166 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
168 cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
171 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
172 RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__);
176 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
177 ips->security.ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
178 nexthdr = &m->inner_esp_next_proto;
180 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
181 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
182 pad_len = nexthdr - 1;
184 padding = pad_len - *pad_len;
185 for (i = 0; i < *pad_len; i++) {
186 if (padding[i] != i + 1) {
187 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
192 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
193 RTE_LOG(ERR, IPSEC_ESP,
194 "failed to remove pad_len + digest\n");
199 if (unlikely(IS_TRANSPORT(sa->flags))) {
200 ip = rte_pktmbuf_mtod(m, struct ip *);
201 ip4 = (struct ip *)rte_pktmbuf_adj(m,
202 sizeof(struct rte_esp_hdr) + sa->iv_len);
203 if (likely(ip->ip_v == IPVERSION)) {
204 memmove(ip4, ip, ip->ip_hl * 4);
205 ip4->ip_p = *nexthdr;
206 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
208 ip6 = (struct ip6_hdr *)ip4;
209 /* XXX No option headers supported */
210 memmove(ip6, ip, sizeof(struct ip6_hdr));
211 ip6->ip6_nxt = *nexthdr;
212 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
213 sizeof(struct ip6_hdr));
216 ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len);
222 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
223 struct rte_crypto_op *cop)
227 struct rte_esp_hdr *esp = NULL;
228 uint8_t *padding = NULL, *new_ip, nlp;
229 struct rte_crypto_sym_op *sym_cop;
231 uint16_t pad_payload_len, pad_len, ip_hdr_len;
232 struct rte_ipsec_session *ips;
234 RTE_ASSERT(m != NULL);
235 RTE_ASSERT(sa != NULL);
237 ips = ipsec_get_primary_session(sa);
240 ip4 = rte_pktmbuf_mtod(m, struct ip *);
241 if (likely(ip4->ip_v == IPVERSION)) {
242 if (unlikely(IS_TRANSPORT(sa->flags))) {
243 ip_hdr_len = ip4->ip_hl * 4;
247 } else if (ip4->ip_v == IP6_VERSION) {
248 if (unlikely(IS_TRANSPORT(sa->flags))) {
249 /* XXX No option headers supported */
250 ip_hdr_len = sizeof(struct ip6_hdr);
251 ip6 = (struct ip6_hdr *)ip4;
256 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
261 /* Padded payload length */
262 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
263 ip_hdr_len + 2, sa->block_size);
264 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
266 RTE_ASSERT(IS_TUNNEL(sa->flags) || IS_TRANSPORT(sa->flags));
268 if (likely(IS_IP4_TUNNEL(sa->flags)))
269 ip_hdr_len = sizeof(struct ip);
270 else if (IS_IP6_TUNNEL(sa->flags))
271 ip_hdr_len = sizeof(struct ip6_hdr);
272 else if (!IS_TRANSPORT(sa->flags)) {
273 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
278 /* Check maximum packet size */
279 if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len +
280 pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
281 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
285 /* Add trailer padding if it is not constructed by HW */
286 if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
287 (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
288 !(ips->security.ol_flags &
289 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
290 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
292 if (unlikely(padding == NULL)) {
293 RTE_LOG(ERR, IPSEC_ESP,
294 "not enough mbuf trailing space\n");
297 rte_prefetch0(padding);
300 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
302 ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
304 esp = (struct rte_esp_hdr *)(ip4 + 1);
307 ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
309 esp = (struct rte_esp_hdr *)(ip6 + 1);
312 new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
313 sizeof(struct rte_esp_hdr) + sa->iv_len);
314 memmove(new_ip, ip4, ip_hdr_len);
315 esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len);
316 ip4 = (struct ip *)new_ip;
317 if (likely(ip4->ip_v == IPVERSION)) {
318 ip4->ip_p = IPPROTO_ESP;
319 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
321 ip6 = (struct ip6_hdr *)new_ip;
322 ip6->ip6_nxt = IPPROTO_ESP;
323 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
324 sizeof(struct ip6_hdr));
329 esp->spi = rte_cpu_to_be_32(sa->spi);
330 esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
333 uint64_t *iv = (uint64_t *)(esp + 1);
334 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
335 *iv = rte_cpu_to_be_64(sa->seq);
337 switch (sa->cipher_algo) {
338 case RTE_CRYPTO_CIPHER_NULL:
339 case RTE_CRYPTO_CIPHER_3DES_CBC:
340 case RTE_CRYPTO_CIPHER_AES_CBC:
341 memset(iv, 0, sa->iv_len);
343 case RTE_CRYPTO_CIPHER_AES_CTR:
344 *iv = rte_cpu_to_be_64(sa->seq);
347 RTE_LOG(ERR, IPSEC_ESP,
348 "unsupported cipher algorithm %u\n",
354 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
355 if (ips->security.ol_flags &
356 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
357 /* Set the inner esp next protocol for HW trailer */
358 m->inner_esp_next_proto = nlp;
359 m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
361 padding[pad_len - 2] = pad_len - 2;
362 padding[pad_len - 1] = nlp;
367 RTE_ASSERT(cop != NULL);
368 sym_cop = get_sym_cop(cop);
371 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
374 sym_cop->aead.data.offset = ip_hdr_len +
375 sizeof(struct rte_esp_hdr) + sa->iv_len;
376 sym_cop->aead.data.length = pad_payload_len;
378 /* Fill pad_len using default sequential scheme */
379 for (i = 0; i < pad_len - 2; i++)
381 padding[pad_len - 2] = pad_len - 2;
382 padding[pad_len - 1] = nlp;
384 struct cnt_blk *icb = get_cnt_blk(m);
385 icb->salt = sa->salt;
386 icb->iv = rte_cpu_to_be_64(sa->seq);
387 icb->cnt = rte_cpu_to_be_32(1);
391 sym_cop->aead.aad.data = aad;
392 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
393 aad - rte_pktmbuf_mtod(m, uint8_t *));
395 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
396 rte_pktmbuf_pkt_len(m) - sa->digest_len);
397 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
398 rte_pktmbuf_pkt_len(m) - sa->digest_len);
400 switch (sa->cipher_algo) {
401 case RTE_CRYPTO_CIPHER_NULL:
402 case RTE_CRYPTO_CIPHER_3DES_CBC:
403 case RTE_CRYPTO_CIPHER_AES_CBC:
404 sym_cop->cipher.data.offset = ip_hdr_len +
405 sizeof(struct rte_esp_hdr);
406 sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
408 case RTE_CRYPTO_CIPHER_AES_CTR:
409 sym_cop->cipher.data.offset = ip_hdr_len +
410 sizeof(struct rte_esp_hdr) + sa->iv_len;
411 sym_cop->cipher.data.length = pad_payload_len;
414 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
419 /* Fill pad_len using default sequential scheme */
420 for (i = 0; i < pad_len - 2; i++)
422 padding[pad_len - 2] = pad_len - 2;
423 padding[pad_len - 1] = nlp;
425 struct cnt_blk *icb = get_cnt_blk(m);
426 icb->salt = sa->salt;
427 icb->iv = rte_cpu_to_be_64(sa->seq);
428 icb->cnt = rte_cpu_to_be_32(1);
430 switch (sa->auth_algo) {
431 case RTE_CRYPTO_AUTH_NULL:
432 case RTE_CRYPTO_AUTH_SHA1_HMAC:
433 case RTE_CRYPTO_AUTH_SHA256_HMAC:
434 sym_cop->auth.data.offset = ip_hdr_len;
435 sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
436 sa->iv_len + pad_payload_len;
439 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
444 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
445 rte_pktmbuf_pkt_len(m) - sa->digest_len);
446 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
447 rte_pktmbuf_pkt_len(m) - sa->digest_len);
455 esp_outbound_post(struct rte_mbuf *m,
457 struct rte_crypto_op *cop)
459 enum rte_security_session_action_type type;
460 RTE_ASSERT(m != NULL);
461 RTE_ASSERT(sa != NULL);
463 type = ipsec_get_action_type(sa);
465 if ((type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
466 (type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
467 m->ol_flags |= PKT_TX_SEC_OFFLOAD;
469 RTE_ASSERT(cop != NULL);
470 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
471 RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n",