X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fipsec-secgw%2Fesp.c;h=a63fb954c22e24d6d9b478229184aa4be25fcc85;hb=2a41fb7c65259e36a8fb1c60eb2c5fc73ade9575;hp=05caa77a2f5dc95611dce304fb3669407ad30982;hpb=f159e70b09225d7f4d814dbdaec78f818edc5031;p=dpdk.git diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c index 05caa77a2f..a63fb954c2 100644 --- a/examples/ipsec-secgw/esp.c +++ b/examples/ipsec-secgw/esp.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2016 Intel Corporation. All rights reserved. + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -50,21 +50,6 @@ #include "esp.h" #include "ipip.h" -static inline void -random_iv_u64(uint64_t *buf, uint16_t n) -{ - uint32_t left = n & 0x7; - uint32_t i; - - RTE_ASSERT((n & 0x3) == 0); - - for (i = 0; i < (n >> 3); i++) - buf[i] = rte_rand(); - - if (left) - *((uint32_t *)&buf[i]) = (uint32_t)lrand48(); -} - int esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct rte_crypto_op *cop) @@ -93,33 +78,85 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len; if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) { - RTE_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n", + RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n", payload_len, sa->block_size); return -EINVAL; } - sym_cop = (struct rte_crypto_sym_op *)(cop + 1); - + sym_cop = get_sym_cop(cop); sym_cop->m_src = m; - sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + - sa->iv_len; - sym_cop->cipher.data.length = payload_len; - sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, void*, - ip_hdr_len + sizeof(struct esp_hdr)); - sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, - ip_hdr_len + sizeof(struct esp_hdr)); - sym_cop->cipher.iv.length = sa->iv_len; + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + + sa->iv_len; + sym_cop->aead.data.length = payload_len; + + struct cnt_blk *icb; + uint8_t *aad; + uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); + + icb = get_cnt_blk(m); + icb->salt = sa->salt; + memcpy(&icb->iv, iv, 8); + icb->cnt = rte_cpu_to_be_32(1); + + aad = get_aad(m); + memcpy(aad, iv - sizeof(struct esp_hdr), 8); + sym_cop->aead.aad.data = aad; + sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); + + sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + } else { + sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + + sa->iv_len; + sym_cop->cipher.data.length = payload_len; + + struct cnt_blk *icb; + uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); + uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop, + uint8_t *, IV_OFFSET); + + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + /* Copy IV at the end of crypto operation */ + rte_memcpy(iv_ptr, iv, sa->iv_len); + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + icb = get_cnt_blk(m); + icb->salt = sa->salt; + memcpy(&icb->iv, iv, 8); + icb->cnt = rte_cpu_to_be_32(1); + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } - sym_cop->auth.data.offset = ip_hdr_len; - sym_cop->auth.data.length = sizeof(struct esp_hdr) + - sa->iv_len + payload_len; + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + case RTE_CRYPTO_AUTH_SHA256_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sa->iv_len + payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } - sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, - rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, - rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->auth.digest.length = sa->digest_len; + sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + } return 0; } @@ -174,7 +211,8 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, /* XXX No option headers supported */ memmove(ip6, ip, sizeof(struct ip6_hdr)); ip6->ip6_nxt = *nexthdr; - ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); } } else ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len); @@ -269,52 +307,107 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, sizeof(struct esp_hdr) + sa->iv_len); memmove(new_ip, ip4, ip_hdr_len); esp = (struct esp_hdr *)(new_ip + ip_hdr_len); + ip4 = (struct ip *)new_ip; if (likely(ip4->ip_v == IPVERSION)) { - ip4 = (struct ip *)new_ip; ip4->ip_p = IPPROTO_ESP; ip4->ip_len = htons(rte_pktmbuf_data_len(m)); } else { ip6 = (struct ip6_hdr *)new_ip; ip6->ip6_nxt = IPPROTO_ESP; - ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); } } sa->seq++; esp->spi = rte_cpu_to_be_32(sa->spi); - esp->seq = rte_cpu_to_be_32(sa->seq); + esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); - if (sa->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC) - random_iv_u64((uint64_t *)(esp + 1), sa->iv_len); + uint64_t *iv = (uint64_t *)(esp + 1); - /* Fill pad_len using default sequential scheme */ - for (i = 0; i < pad_len - 2; i++) - padding[i] = i + 1; - padding[pad_len - 2] = pad_len - 2; - padding[pad_len - 1] = nlp; + sym_cop = get_sym_cop(cop); + sym_cop->m_src = m; - sym_cop = (struct rte_crypto_sym_op *)(cop + 1); + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + uint8_t *aad; - sym_cop->m_src = m; - sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + - sa->iv_len; - sym_cop->cipher.data.length = pad_payload_len; + *iv = rte_cpu_to_be_64(sa->seq); + sym_cop->aead.data.offset = ip_hdr_len + + sizeof(struct esp_hdr) + sa->iv_len; + sym_cop->aead.data.length = pad_payload_len; + + /* Fill pad_len using default sequential scheme */ + for (i = 0; i < pad_len - 2; i++) + padding[i] = i + 1; + padding[pad_len - 2] = pad_len - 2; + padding[pad_len - 1] = nlp; - sym_cop->cipher.iv.data = rte_pktmbuf_mtod_offset(m, uint8_t *, - ip_hdr_len + sizeof(struct esp_hdr)); - sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys_offset(m, - ip_hdr_len + sizeof(struct esp_hdr)); - sym_cop->cipher.iv.length = sa->iv_len; + struct cnt_blk *icb = get_cnt_blk(m); + icb->salt = sa->salt; + icb->iv = rte_cpu_to_be_64(sa->seq); + icb->cnt = rte_cpu_to_be_32(1); - sym_cop->auth.data.offset = ip_hdr_len; - sym_cop->auth.data.length = sizeof(struct esp_hdr) + sa->iv_len + - pad_payload_len; + aad = get_aad(m); + memcpy(aad, esp, 8); + sym_cop->aead.aad.data = aad; + sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + aad - rte_pktmbuf_mtod(m, uint8_t *)); - sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, + sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->auth.digest.length = sa->digest_len; + } else { + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + memset(iv, 0, sa->iv_len); + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct esp_hdr); + sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + *iv = rte_cpu_to_be_64(sa->seq); + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct esp_hdr) + sa->iv_len; + sym_cop->cipher.data.length = pad_payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + + /* Fill pad_len using default sequential scheme */ + for (i = 0; i < pad_len - 2; i++) + padding[i] = i + 1; + padding[pad_len - 2] = pad_len - 2; + padding[pad_len - 1] = nlp; + + struct cnt_blk *icb = get_cnt_blk(m); + icb->salt = sa->salt; + icb->iv = rte_cpu_to_be_64(sa->seq); + icb->cnt = rte_cpu_to_be_32(1); + + switch (sa->auth_algo) { + case RTE_CRYPTO_AUTH_NULL: + case RTE_CRYPTO_AUTH_SHA1_HMAC: + case RTE_CRYPTO_AUTH_SHA256_HMAC: + sym_cop->auth.data.offset = ip_hdr_len; + sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sa->iv_len + pad_payload_len; + break; + default: + RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", + sa->auth_algo); + return -EINVAL; + } + + sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + rte_pktmbuf_pkt_len(m) - sa->digest_len); + } return 0; }