1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
15 #include <rte_common.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_random.h>
25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
26 struct rte_crypto_op *cop)
29 struct rte_crypto_sym_op *sym_cop;
30 int32_t payload_len, ip_hdr_len;
32 RTE_ASSERT(sa != NULL);
33 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
36 RTE_ASSERT(m != NULL);
37 RTE_ASSERT(cop != NULL);
39 ip4 = rte_pktmbuf_mtod(m, struct ip *);
40 if (likely(ip4->ip_v == IPVERSION))
41 ip_hdr_len = ip4->ip_hl * 4;
42 else if (ip4->ip_v == IP6_VERSION)
43 /* XXX No option headers supported */
44 ip_hdr_len = sizeof(struct ip6_hdr);
46 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
51 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
52 sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
54 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
55 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
56 payload_len, sa->block_size);
60 sym_cop = get_sym_cop(cop);
63 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
64 sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
66 sym_cop->aead.data.length = payload_len;
70 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
74 memcpy(&icb->iv, iv, 8);
75 icb->cnt = rte_cpu_to_be_32(1);
78 memcpy(aad, iv - sizeof(struct esp_hdr), 8);
79 sym_cop->aead.aad.data = aad;
80 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
81 aad - rte_pktmbuf_mtod(m, uint8_t *));
83 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
84 rte_pktmbuf_pkt_len(m) - sa->digest_len);
85 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
86 rte_pktmbuf_pkt_len(m) - sa->digest_len);
88 sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
90 sym_cop->cipher.data.length = payload_len;
93 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
94 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
95 uint8_t *, IV_OFFSET);
97 switch (sa->cipher_algo) {
98 case RTE_CRYPTO_CIPHER_NULL:
99 case RTE_CRYPTO_CIPHER_AES_CBC:
100 /* Copy IV at the end of crypto operation */
101 rte_memcpy(iv_ptr, iv, sa->iv_len);
103 case RTE_CRYPTO_CIPHER_AES_CTR:
104 icb = get_cnt_blk(m);
105 icb->salt = sa->salt;
106 memcpy(&icb->iv, iv, 8);
107 icb->cnt = rte_cpu_to_be_32(1);
110 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
115 switch (sa->auth_algo) {
116 case RTE_CRYPTO_AUTH_NULL:
117 case RTE_CRYPTO_AUTH_SHA1_HMAC:
118 case RTE_CRYPTO_AUTH_SHA256_HMAC:
119 sym_cop->auth.data.offset = ip_hdr_len;
120 sym_cop->auth.data.length = sizeof(struct esp_hdr) +
121 sa->iv_len + payload_len;
124 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
129 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
130 rte_pktmbuf_pkt_len(m) - sa->digest_len);
131 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
132 rte_pktmbuf_pkt_len(m) - sa->digest_len);
139 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
140 struct rte_crypto_op *cop)
144 uint8_t *nexthdr, *pad_len;
148 RTE_ASSERT(m != NULL);
149 RTE_ASSERT(sa != NULL);
150 RTE_ASSERT(cop != NULL);
152 if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
153 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
154 if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
155 if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
156 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
158 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
160 cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
163 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
164 RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
168 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
169 sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
170 nexthdr = &m->inner_esp_next_proto;
172 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
173 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
174 pad_len = nexthdr - 1;
176 padding = pad_len - *pad_len;
177 for (i = 0; i < *pad_len; i++) {
178 if (padding[i] != i + 1) {
179 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
184 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
185 RTE_LOG(ERR, IPSEC_ESP,
186 "failed to remove pad_len + digest\n");
191 if (unlikely(sa->flags == TRANSPORT)) {
192 ip = rte_pktmbuf_mtod(m, struct ip *);
193 ip4 = (struct ip *)rte_pktmbuf_adj(m,
194 sizeof(struct esp_hdr) + sa->iv_len);
195 if (likely(ip->ip_v == IPVERSION)) {
196 memmove(ip4, ip, ip->ip_hl * 4);
197 ip4->ip_p = *nexthdr;
198 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
200 ip6 = (struct ip6_hdr *)ip4;
201 /* XXX No option headers supported */
202 memmove(ip6, ip, sizeof(struct ip6_hdr));
203 ip6->ip6_nxt = *nexthdr;
204 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
205 sizeof(struct ip6_hdr));
208 ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
214 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
215 struct rte_crypto_op *cop)
219 struct esp_hdr *esp = NULL;
220 uint8_t *padding = NULL, *new_ip, nlp;
221 struct rte_crypto_sym_op *sym_cop;
223 uint16_t pad_payload_len, pad_len, ip_hdr_len;
225 RTE_ASSERT(m != NULL);
226 RTE_ASSERT(sa != NULL);
230 ip4 = rte_pktmbuf_mtod(m, struct ip *);
231 if (likely(ip4->ip_v == IPVERSION)) {
232 if (unlikely(sa->flags == TRANSPORT)) {
233 ip_hdr_len = ip4->ip_hl * 4;
237 } else if (ip4->ip_v == IP6_VERSION) {
238 if (unlikely(sa->flags == TRANSPORT)) {
239 /* XXX No option headers supported */
240 ip_hdr_len = sizeof(struct ip6_hdr);
241 ip6 = (struct ip6_hdr *)ip4;
246 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
251 /* Padded payload length */
252 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
253 ip_hdr_len + 2, sa->block_size);
254 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
256 RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
257 sa->flags == TRANSPORT);
259 if (likely(sa->flags == IP4_TUNNEL))
260 ip_hdr_len = sizeof(struct ip);
261 else if (sa->flags == IP6_TUNNEL)
262 ip_hdr_len = sizeof(struct ip6_hdr);
263 else if (sa->flags != TRANSPORT) {
264 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
269 /* Check maximum packet size */
270 if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
271 pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
272 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
276 /* Add trailer padding if it is not constructed by HW */
277 if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
278 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
279 !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
280 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
282 if (unlikely(padding == NULL)) {
283 RTE_LOG(ERR, IPSEC_ESP,
284 "not enough mbuf trailing space\n");
287 rte_prefetch0(padding);
292 ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
294 esp = (struct esp_hdr *)(ip4 + 1);
297 ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
299 esp = (struct esp_hdr *)(ip6 + 1);
302 new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
303 sizeof(struct esp_hdr) + sa->iv_len);
304 memmove(new_ip, ip4, ip_hdr_len);
305 esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
306 ip4 = (struct ip *)new_ip;
307 if (likely(ip4->ip_v == IPVERSION)) {
308 ip4->ip_p = IPPROTO_ESP;
309 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
311 ip6 = (struct ip6_hdr *)new_ip;
312 ip6->ip6_nxt = IPPROTO_ESP;
313 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
314 sizeof(struct ip6_hdr));
319 esp->spi = rte_cpu_to_be_32(sa->spi);
320 esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
323 uint64_t *iv = (uint64_t *)(esp + 1);
324 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
325 *iv = rte_cpu_to_be_64(sa->seq);
327 switch (sa->cipher_algo) {
328 case RTE_CRYPTO_CIPHER_NULL:
329 case RTE_CRYPTO_CIPHER_AES_CBC:
330 memset(iv, 0, sa->iv_len);
332 case RTE_CRYPTO_CIPHER_AES_CTR:
333 *iv = rte_cpu_to_be_64(sa->seq);
336 RTE_LOG(ERR, IPSEC_ESP,
337 "unsupported cipher algorithm %u\n",
343 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
344 if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
345 /* Set the inner esp next protocol for HW trailer */
346 m->inner_esp_next_proto = nlp;
347 m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
349 padding[pad_len - 2] = pad_len - 2;
350 padding[pad_len - 1] = nlp;
355 RTE_ASSERT(cop != NULL);
356 sym_cop = get_sym_cop(cop);
359 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
362 sym_cop->aead.data.offset = ip_hdr_len +
363 sizeof(struct esp_hdr) + sa->iv_len;
364 sym_cop->aead.data.length = pad_payload_len;
366 /* Fill pad_len using default sequential scheme */
367 for (i = 0; i < pad_len - 2; i++)
369 padding[pad_len - 2] = pad_len - 2;
370 padding[pad_len - 1] = nlp;
372 struct cnt_blk *icb = get_cnt_blk(m);
373 icb->salt = sa->salt;
374 icb->iv = rte_cpu_to_be_64(sa->seq);
375 icb->cnt = rte_cpu_to_be_32(1);
379 sym_cop->aead.aad.data = aad;
380 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
381 aad - rte_pktmbuf_mtod(m, uint8_t *));
383 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
384 rte_pktmbuf_pkt_len(m) - sa->digest_len);
385 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
386 rte_pktmbuf_pkt_len(m) - sa->digest_len);
388 switch (sa->cipher_algo) {
389 case RTE_CRYPTO_CIPHER_NULL:
390 case RTE_CRYPTO_CIPHER_AES_CBC:
391 sym_cop->cipher.data.offset = ip_hdr_len +
392 sizeof(struct esp_hdr);
393 sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
395 case RTE_CRYPTO_CIPHER_AES_CTR:
396 sym_cop->cipher.data.offset = ip_hdr_len +
397 sizeof(struct esp_hdr) + sa->iv_len;
398 sym_cop->cipher.data.length = pad_payload_len;
401 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
406 /* Fill pad_len using default sequential scheme */
407 for (i = 0; i < pad_len - 2; i++)
409 padding[pad_len - 2] = pad_len - 2;
410 padding[pad_len - 1] = nlp;
412 struct cnt_blk *icb = get_cnt_blk(m);
413 icb->salt = sa->salt;
414 icb->iv = rte_cpu_to_be_64(sa->seq);
415 icb->cnt = rte_cpu_to_be_32(1);
417 switch (sa->auth_algo) {
418 case RTE_CRYPTO_AUTH_NULL:
419 case RTE_CRYPTO_AUTH_SHA1_HMAC:
420 case RTE_CRYPTO_AUTH_SHA256_HMAC:
421 sym_cop->auth.data.offset = ip_hdr_len;
422 sym_cop->auth.data.length = sizeof(struct esp_hdr) +
423 sa->iv_len + pad_payload_len;
426 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
431 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
432 rte_pktmbuf_pkt_len(m) - sa->digest_len);
433 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
434 rte_pktmbuf_pkt_len(m) - sa->digest_len);
442 esp_outbound_post(struct rte_mbuf *m,
444 struct rte_crypto_op *cop)
446 RTE_ASSERT(m != NULL);
447 RTE_ASSERT(sa != NULL);
449 if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
450 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
451 m->ol_flags |= PKT_TX_SEC_OFFLOAD;
453 RTE_ASSERT(cop != NULL);
454 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
455 RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");