1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
15 #include <rte_common.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_random.h>
25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
26 struct rte_crypto_op *cop)
29 struct rte_crypto_sym_op *sym_cop;
30 int32_t payload_len, ip_hdr_len;
32 RTE_ASSERT(sa != NULL);
33 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
36 RTE_ASSERT(m != NULL);
37 RTE_ASSERT(cop != NULL);
39 ip4 = rte_pktmbuf_mtod(m, struct ip *);
40 if (likely(ip4->ip_v == IPVERSION))
41 ip_hdr_len = ip4->ip_hl * 4;
42 else if (ip4->ip_v == IP6_VERSION)
43 /* XXX No option headers supported */
44 ip_hdr_len = sizeof(struct ip6_hdr);
46 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
51 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
52 sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
54 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
55 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
56 payload_len, sa->block_size);
60 sym_cop = get_sym_cop(cop);
63 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
64 sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
66 sym_cop->aead.data.length = payload_len;
70 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
74 memcpy(&icb->iv, iv, 8);
75 icb->cnt = rte_cpu_to_be_32(1);
78 memcpy(aad, iv - sizeof(struct esp_hdr), 8);
79 sym_cop->aead.aad.data = aad;
80 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
81 aad - rte_pktmbuf_mtod(m, uint8_t *));
83 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
84 rte_pktmbuf_pkt_len(m) - sa->digest_len);
85 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
86 rte_pktmbuf_pkt_len(m) - sa->digest_len);
88 sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
90 sym_cop->cipher.data.length = payload_len;
93 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
94 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
95 uint8_t *, IV_OFFSET);
97 switch (sa->cipher_algo) {
98 case RTE_CRYPTO_CIPHER_NULL:
99 case RTE_CRYPTO_CIPHER_3DES_CBC:
100 case RTE_CRYPTO_CIPHER_AES_CBC:
101 /* Copy IV at the end of crypto operation */
102 rte_memcpy(iv_ptr, iv, sa->iv_len);
104 case RTE_CRYPTO_CIPHER_AES_CTR:
105 icb = get_cnt_blk(m);
106 icb->salt = sa->salt;
107 memcpy(&icb->iv, iv, 8);
108 icb->cnt = rte_cpu_to_be_32(1);
111 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
116 switch (sa->auth_algo) {
117 case RTE_CRYPTO_AUTH_NULL:
118 case RTE_CRYPTO_AUTH_SHA1_HMAC:
119 case RTE_CRYPTO_AUTH_SHA256_HMAC:
120 sym_cop->auth.data.offset = ip_hdr_len;
121 sym_cop->auth.data.length = sizeof(struct esp_hdr) +
122 sa->iv_len + payload_len;
125 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
130 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
131 rte_pktmbuf_pkt_len(m) - sa->digest_len);
132 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
133 rte_pktmbuf_pkt_len(m) - sa->digest_len);
140 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
141 struct rte_crypto_op *cop)
145 uint8_t *nexthdr, *pad_len;
149 RTE_ASSERT(m != NULL);
150 RTE_ASSERT(sa != NULL);
151 RTE_ASSERT(cop != NULL);
153 if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
154 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
155 if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
156 if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
157 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
159 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
161 cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
164 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
165 RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
169 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
170 sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
171 nexthdr = &m->inner_esp_next_proto;
173 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
174 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
175 pad_len = nexthdr - 1;
177 padding = pad_len - *pad_len;
178 for (i = 0; i < *pad_len; i++) {
179 if (padding[i] != i + 1) {
180 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
185 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
186 RTE_LOG(ERR, IPSEC_ESP,
187 "failed to remove pad_len + digest\n");
192 if (unlikely(sa->flags == TRANSPORT)) {
193 ip = rte_pktmbuf_mtod(m, struct ip *);
194 ip4 = (struct ip *)rte_pktmbuf_adj(m,
195 sizeof(struct esp_hdr) + sa->iv_len);
196 if (likely(ip->ip_v == IPVERSION)) {
197 memmove(ip4, ip, ip->ip_hl * 4);
198 ip4->ip_p = *nexthdr;
199 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
201 ip6 = (struct ip6_hdr *)ip4;
202 /* XXX No option headers supported */
203 memmove(ip6, ip, sizeof(struct ip6_hdr));
204 ip6->ip6_nxt = *nexthdr;
205 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
206 sizeof(struct ip6_hdr));
209 ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
215 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
216 struct rte_crypto_op *cop)
220 struct esp_hdr *esp = NULL;
221 uint8_t *padding = NULL, *new_ip, nlp;
222 struct rte_crypto_sym_op *sym_cop;
224 uint16_t pad_payload_len, pad_len, ip_hdr_len;
226 RTE_ASSERT(m != NULL);
227 RTE_ASSERT(sa != NULL);
231 ip4 = rte_pktmbuf_mtod(m, struct ip *);
232 if (likely(ip4->ip_v == IPVERSION)) {
233 if (unlikely(sa->flags == TRANSPORT)) {
234 ip_hdr_len = ip4->ip_hl * 4;
238 } else if (ip4->ip_v == IP6_VERSION) {
239 if (unlikely(sa->flags == TRANSPORT)) {
240 /* XXX No option headers supported */
241 ip_hdr_len = sizeof(struct ip6_hdr);
242 ip6 = (struct ip6_hdr *)ip4;
247 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
252 /* Padded payload length */
253 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
254 ip_hdr_len + 2, sa->block_size);
255 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
257 RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
258 sa->flags == TRANSPORT);
260 if (likely(sa->flags == IP4_TUNNEL))
261 ip_hdr_len = sizeof(struct ip);
262 else if (sa->flags == IP6_TUNNEL)
263 ip_hdr_len = sizeof(struct ip6_hdr);
264 else if (sa->flags != TRANSPORT) {
265 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
270 /* Check maximum packet size */
271 if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
272 pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
273 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
277 /* Add trailer padding if it is not constructed by HW */
278 if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
279 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
280 !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
281 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
283 if (unlikely(padding == NULL)) {
284 RTE_LOG(ERR, IPSEC_ESP,
285 "not enough mbuf trailing space\n");
288 rte_prefetch0(padding);
293 ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
295 esp = (struct esp_hdr *)(ip4 + 1);
298 ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
300 esp = (struct esp_hdr *)(ip6 + 1);
303 new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
304 sizeof(struct esp_hdr) + sa->iv_len);
305 memmove(new_ip, ip4, ip_hdr_len);
306 esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
307 ip4 = (struct ip *)new_ip;
308 if (likely(ip4->ip_v == IPVERSION)) {
309 ip4->ip_p = IPPROTO_ESP;
310 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
312 ip6 = (struct ip6_hdr *)new_ip;
313 ip6->ip6_nxt = IPPROTO_ESP;
314 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
315 sizeof(struct ip6_hdr));
320 esp->spi = rte_cpu_to_be_32(sa->spi);
321 esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
324 uint64_t *iv = (uint64_t *)(esp + 1);
325 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
326 *iv = rte_cpu_to_be_64(sa->seq);
328 switch (sa->cipher_algo) {
329 case RTE_CRYPTO_CIPHER_NULL:
330 case RTE_CRYPTO_CIPHER_3DES_CBC:
331 case RTE_CRYPTO_CIPHER_AES_CBC:
332 memset(iv, 0, sa->iv_len);
334 case RTE_CRYPTO_CIPHER_AES_CTR:
335 *iv = rte_cpu_to_be_64(sa->seq);
338 RTE_LOG(ERR, IPSEC_ESP,
339 "unsupported cipher algorithm %u\n",
345 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
346 if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
347 /* Set the inner esp next protocol for HW trailer */
348 m->inner_esp_next_proto = nlp;
349 m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
351 padding[pad_len - 2] = pad_len - 2;
352 padding[pad_len - 1] = nlp;
357 RTE_ASSERT(cop != NULL);
358 sym_cop = get_sym_cop(cop);
361 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
364 sym_cop->aead.data.offset = ip_hdr_len +
365 sizeof(struct esp_hdr) + sa->iv_len;
366 sym_cop->aead.data.length = pad_payload_len;
368 /* Fill pad_len using default sequential scheme */
369 for (i = 0; i < pad_len - 2; i++)
371 padding[pad_len - 2] = pad_len - 2;
372 padding[pad_len - 1] = nlp;
374 struct cnt_blk *icb = get_cnt_blk(m);
375 icb->salt = sa->salt;
376 icb->iv = rte_cpu_to_be_64(sa->seq);
377 icb->cnt = rte_cpu_to_be_32(1);
381 sym_cop->aead.aad.data = aad;
382 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
383 aad - rte_pktmbuf_mtod(m, uint8_t *));
385 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
386 rte_pktmbuf_pkt_len(m) - sa->digest_len);
387 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
388 rte_pktmbuf_pkt_len(m) - sa->digest_len);
390 switch (sa->cipher_algo) {
391 case RTE_CRYPTO_CIPHER_NULL:
392 case RTE_CRYPTO_CIPHER_3DES_CBC:
393 case RTE_CRYPTO_CIPHER_AES_CBC:
394 sym_cop->cipher.data.offset = ip_hdr_len +
395 sizeof(struct esp_hdr);
396 sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
398 case RTE_CRYPTO_CIPHER_AES_CTR:
399 sym_cop->cipher.data.offset = ip_hdr_len +
400 sizeof(struct esp_hdr) + sa->iv_len;
401 sym_cop->cipher.data.length = pad_payload_len;
404 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
409 /* Fill pad_len using default sequential scheme */
410 for (i = 0; i < pad_len - 2; i++)
412 padding[pad_len - 2] = pad_len - 2;
413 padding[pad_len - 1] = nlp;
415 struct cnt_blk *icb = get_cnt_blk(m);
416 icb->salt = sa->salt;
417 icb->iv = rte_cpu_to_be_64(sa->seq);
418 icb->cnt = rte_cpu_to_be_32(1);
420 switch (sa->auth_algo) {
421 case RTE_CRYPTO_AUTH_NULL:
422 case RTE_CRYPTO_AUTH_SHA1_HMAC:
423 case RTE_CRYPTO_AUTH_SHA256_HMAC:
424 sym_cop->auth.data.offset = ip_hdr_len;
425 sym_cop->auth.data.length = sizeof(struct esp_hdr) +
426 sa->iv_len + pad_payload_len;
429 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
434 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
435 rte_pktmbuf_pkt_len(m) - sa->digest_len);
436 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
437 rte_pktmbuf_pkt_len(m) - sa->digest_len);
445 esp_outbound_post(struct rte_mbuf *m,
447 struct rte_crypto_op *cop)
449 RTE_ASSERT(m != NULL);
450 RTE_ASSERT(sa != NULL);
452 if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
453 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
454 m->ol_flags |= PKT_TX_SEC_OFFLOAD;
456 RTE_ASSERT(cop != NULL);
457 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
458 RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");