1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
15 #include <rte_common.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_random.h>
25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
26 struct rte_crypto_op *cop)
29 struct rte_crypto_sym_op *sym_cop;
30 int32_t payload_len, ip_hdr_len;
32 RTE_ASSERT(sa != NULL);
33 if (ipsec_get_action_type(sa) ==
34 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
37 RTE_ASSERT(m != NULL);
38 RTE_ASSERT(cop != NULL);
40 ip4 = rte_pktmbuf_mtod(m, struct ip *);
41 if (likely(ip4->ip_v == IPVERSION))
42 ip_hdr_len = ip4->ip_hl * 4;
43 else if (ip4->ip_v == IP6_VERSION)
44 /* XXX No option headers supported */
45 ip_hdr_len = sizeof(struct ip6_hdr);
47 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
52 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
53 sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len;
55 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
56 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
57 payload_len, sa->block_size);
61 sym_cop = get_sym_cop(cop);
64 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
65 sym_cop->aead.data.offset =
66 ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len;
67 sym_cop->aead.data.length = payload_len;
71 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
72 sizeof(struct rte_esp_hdr));
76 memcpy(&icb->iv, iv, 8);
77 icb->cnt = rte_cpu_to_be_32(1);
80 memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8);
81 sym_cop->aead.aad.data = aad;
82 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
83 aad - rte_pktmbuf_mtod(m, uint8_t *));
85 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
86 rte_pktmbuf_pkt_len(m) - sa->digest_len);
87 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
88 rte_pktmbuf_pkt_len(m) - sa->digest_len);
90 sym_cop->cipher.data.offset = ip_hdr_len +
91 sizeof(struct rte_esp_hdr) +
93 sym_cop->cipher.data.length = payload_len;
96 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len +
97 sizeof(struct rte_esp_hdr));
98 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
99 uint8_t *, IV_OFFSET);
101 switch (sa->cipher_algo) {
102 case RTE_CRYPTO_CIPHER_NULL:
103 case RTE_CRYPTO_CIPHER_DES_CBC:
104 case RTE_CRYPTO_CIPHER_3DES_CBC:
105 case RTE_CRYPTO_CIPHER_AES_CBC:
106 /* Copy IV at the end of crypto operation */
107 rte_memcpy(iv_ptr, iv, sa->iv_len);
109 case RTE_CRYPTO_CIPHER_AES_CTR:
110 icb = get_cnt_blk(m);
111 icb->salt = sa->salt;
112 memcpy(&icb->iv, iv, 8);
113 icb->cnt = rte_cpu_to_be_32(1);
116 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
121 switch (sa->auth_algo) {
122 case RTE_CRYPTO_AUTH_NULL:
123 case RTE_CRYPTO_AUTH_SHA1_HMAC:
124 case RTE_CRYPTO_AUTH_SHA256_HMAC:
125 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
126 sym_cop->auth.data.offset = ip_hdr_len;
127 sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
128 sa->iv_len + payload_len;
131 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
136 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
137 rte_pktmbuf_pkt_len(m) - sa->digest_len);
138 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
139 rte_pktmbuf_pkt_len(m) - sa->digest_len);
146 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
147 struct rte_crypto_op *cop)
151 uint8_t *nexthdr, *pad_len;
154 struct rte_ipsec_session *ips;
156 RTE_ASSERT(m != NULL);
157 RTE_ASSERT(sa != NULL);
158 RTE_ASSERT(cop != NULL);
160 ips = ipsec_get_primary_session(sa);
162 if ((ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
163 (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
164 if (m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
165 if (m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)
166 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
168 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
170 cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
173 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
174 RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__);
178 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
179 ips->security.ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
180 nexthdr = &m->inner_esp_next_proto;
182 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
183 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
184 pad_len = nexthdr - 1;
186 padding = pad_len - *pad_len;
187 for (i = 0; i < *pad_len; i++) {
188 if (padding[i] != i + 1) {
189 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
194 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
195 RTE_LOG(ERR, IPSEC_ESP,
196 "failed to remove pad_len + digest\n");
201 if (unlikely(IS_TRANSPORT(sa->flags))) {
202 ip = rte_pktmbuf_mtod(m, struct ip *);
203 ip4 = (struct ip *)rte_pktmbuf_adj(m,
204 sizeof(struct rte_esp_hdr) + sa->iv_len);
205 if (likely(ip->ip_v == IPVERSION)) {
206 memmove(ip4, ip, ip->ip_hl * 4);
207 ip4->ip_p = *nexthdr;
208 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
210 ip6 = (struct ip6_hdr *)ip4;
211 /* XXX No option headers supported */
212 memmove(ip6, ip, sizeof(struct ip6_hdr));
213 ip6->ip6_nxt = *nexthdr;
214 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
215 sizeof(struct ip6_hdr));
218 ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len);
224 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
225 struct rte_crypto_op *cop)
229 struct rte_esp_hdr *esp = NULL;
230 uint8_t *padding = NULL, *new_ip, nlp;
231 struct rte_crypto_sym_op *sym_cop;
233 uint16_t pad_payload_len, pad_len, ip_hdr_len;
234 struct rte_ipsec_session *ips;
236 RTE_ASSERT(m != NULL);
237 RTE_ASSERT(sa != NULL);
239 ips = ipsec_get_primary_session(sa);
242 ip4 = rte_pktmbuf_mtod(m, struct ip *);
243 if (likely(ip4->ip_v == IPVERSION)) {
244 if (unlikely(IS_TRANSPORT(sa->flags))) {
245 ip_hdr_len = ip4->ip_hl * 4;
249 } else if (ip4->ip_v == IP6_VERSION) {
250 if (unlikely(IS_TRANSPORT(sa->flags))) {
251 /* XXX No option headers supported */
252 ip_hdr_len = sizeof(struct ip6_hdr);
253 ip6 = (struct ip6_hdr *)ip4;
258 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
263 /* Padded payload length */
264 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
265 ip_hdr_len + 2, sa->block_size);
266 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
268 RTE_ASSERT(IS_TUNNEL(sa->flags) || IS_TRANSPORT(sa->flags));
270 if (likely(IS_IP4_TUNNEL(sa->flags)))
271 ip_hdr_len = sizeof(struct ip);
272 else if (IS_IP6_TUNNEL(sa->flags))
273 ip_hdr_len = sizeof(struct ip6_hdr);
274 else if (!IS_TRANSPORT(sa->flags)) {
275 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
280 /* Check maximum packet size */
281 if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len +
282 pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
283 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
287 /* Add trailer padding if it is not constructed by HW */
288 if (ips->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
289 (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
290 !(ips->security.ol_flags &
291 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
292 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
294 if (unlikely(padding == NULL)) {
295 RTE_LOG(ERR, IPSEC_ESP,
296 "not enough mbuf trailing space\n");
299 rte_prefetch0(padding);
302 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
304 ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
306 esp = (struct rte_esp_hdr *)(ip4 + 1);
309 ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len,
311 esp = (struct rte_esp_hdr *)(ip6 + 1);
314 new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
315 sizeof(struct rte_esp_hdr) + sa->iv_len);
316 memmove(new_ip, ip4, ip_hdr_len);
317 esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len);
318 ip4 = (struct ip *)new_ip;
319 if (likely(ip4->ip_v == IPVERSION)) {
320 ip4->ip_p = IPPROTO_ESP;
321 ip4->ip_len = htons(rte_pktmbuf_data_len(m));
323 ip6 = (struct ip6_hdr *)new_ip;
324 ip6->ip6_nxt = IPPROTO_ESP;
325 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
326 sizeof(struct ip6_hdr));
331 esp->spi = rte_cpu_to_be_32(sa->spi);
332 esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
335 uint64_t *iv = (uint64_t *)(esp + 1);
336 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
337 *iv = rte_cpu_to_be_64(sa->seq);
339 switch (sa->cipher_algo) {
340 case RTE_CRYPTO_CIPHER_NULL:
341 case RTE_CRYPTO_CIPHER_DES_CBC:
342 case RTE_CRYPTO_CIPHER_3DES_CBC:
343 case RTE_CRYPTO_CIPHER_AES_CBC:
344 memset(iv, 0, sa->iv_len);
346 case RTE_CRYPTO_CIPHER_AES_CTR:
347 *iv = rte_cpu_to_be_64(sa->seq);
350 RTE_LOG(ERR, IPSEC_ESP,
351 "unsupported cipher algorithm %u\n",
357 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
358 if (ips->security.ol_flags &
359 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
360 /* Set the inner esp next protocol for HW trailer */
361 m->inner_esp_next_proto = nlp;
362 m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
364 padding[pad_len - 2] = pad_len - 2;
365 padding[pad_len - 1] = nlp;
370 RTE_ASSERT(cop != NULL);
371 sym_cop = get_sym_cop(cop);
374 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
377 sym_cop->aead.data.offset = ip_hdr_len +
378 sizeof(struct rte_esp_hdr) + sa->iv_len;
379 sym_cop->aead.data.length = pad_payload_len;
381 /* Fill pad_len using default sequential scheme */
382 for (i = 0; i < pad_len - 2; i++)
384 padding[pad_len - 2] = pad_len - 2;
385 padding[pad_len - 1] = nlp;
387 struct cnt_blk *icb = get_cnt_blk(m);
388 icb->salt = sa->salt;
389 icb->iv = rte_cpu_to_be_64(sa->seq);
390 icb->cnt = rte_cpu_to_be_32(1);
394 sym_cop->aead.aad.data = aad;
395 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
396 aad - rte_pktmbuf_mtod(m, uint8_t *));
398 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
399 rte_pktmbuf_pkt_len(m) - sa->digest_len);
400 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
401 rte_pktmbuf_pkt_len(m) - sa->digest_len);
403 switch (sa->cipher_algo) {
404 case RTE_CRYPTO_CIPHER_NULL:
405 case RTE_CRYPTO_CIPHER_DES_CBC:
406 case RTE_CRYPTO_CIPHER_3DES_CBC:
407 case RTE_CRYPTO_CIPHER_AES_CBC:
408 sym_cop->cipher.data.offset = ip_hdr_len +
409 sizeof(struct rte_esp_hdr);
410 sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
412 case RTE_CRYPTO_CIPHER_AES_CTR:
413 sym_cop->cipher.data.offset = ip_hdr_len +
414 sizeof(struct rte_esp_hdr) + sa->iv_len;
415 sym_cop->cipher.data.length = pad_payload_len;
418 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
423 /* Fill pad_len using default sequential scheme */
424 for (i = 0; i < pad_len - 2; i++)
426 padding[pad_len - 2] = pad_len - 2;
427 padding[pad_len - 1] = nlp;
429 struct cnt_blk *icb = get_cnt_blk(m);
430 icb->salt = sa->salt;
431 icb->iv = rte_cpu_to_be_64(sa->seq);
432 icb->cnt = rte_cpu_to_be_32(1);
434 switch (sa->auth_algo) {
435 case RTE_CRYPTO_AUTH_NULL:
436 case RTE_CRYPTO_AUTH_SHA1_HMAC:
437 case RTE_CRYPTO_AUTH_SHA256_HMAC:
438 case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
439 sym_cop->auth.data.offset = ip_hdr_len;
440 sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) +
441 sa->iv_len + pad_payload_len;
444 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
449 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
450 rte_pktmbuf_pkt_len(m) - sa->digest_len);
451 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
452 rte_pktmbuf_pkt_len(m) - sa->digest_len);
460 esp_outbound_post(struct rte_mbuf *m,
462 struct rte_crypto_op *cop)
464 enum rte_security_session_action_type type;
465 RTE_ASSERT(m != NULL);
466 RTE_ASSERT(sa != NULL);
468 type = ipsec_get_action_type(sa);
470 if ((type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
471 (type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
472 m->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
474 RTE_ASSERT(cop != NULL);
475 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
476 RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n",