X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fipsec-secgw%2Fesp.c;h=f11d095ba7464705ba8dbe5b67fb9f9d8dd884ff;hb=5ef2546767525c8a4a3c2bc60357b23c3dffcf6b;hp=c3efe52b15d75cf71aff7f9f2ac35c1462387bf1;hpb=bfa9a8a4605bf6c4c82aa6ebff98de89c73d2024;p=dpdk.git diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c index c3efe52b15..f11d095ba7 100644 --- a/examples/ipsec-secgw/esp.c +++ b/examples/ipsec-secgw/esp.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include @@ -78,7 +49,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, } payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len - - sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len; + sizeof(struct rte_esp_hdr) - sa->iv_len - sa->digest_len; if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) { RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n", @@ -90,13 +61,14 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, sym_cop->m_src = m; if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { - sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + - sa->iv_len; + sym_cop->aead.data.offset = + ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len; sym_cop->aead.data.length = payload_len; struct cnt_blk *icb; uint8_t *aad; - uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); + uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + + sizeof(struct rte_esp_hdr)); icb = get_cnt_blk(m); icb->salt = sa->salt; @@ -104,7 +76,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, icb->cnt = rte_cpu_to_be_32(1); aad = get_aad(m); - memcpy(aad, iv - sizeof(struct esp_hdr), 8); + memcpy(aad, iv - sizeof(struct rte_esp_hdr), 8); sym_cop->aead.aad.data = aad; sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, aad - rte_pktmbuf_mtod(m, uint8_t *)); @@ -114,17 +86,20 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); } else { - sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + + sym_cop->cipher.data.offset = ip_hdr_len + + sizeof(struct rte_esp_hdr) + sa->iv_len; sym_cop->cipher.data.length = payload_len; struct cnt_blk *icb; - uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); + uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + + sizeof(struct rte_esp_hdr)); uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET); switch (sa->cipher_algo) { case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_3DES_CBC: case RTE_CRYPTO_CIPHER_AES_CBC: /* Copy IV at the end of crypto operation */ rte_memcpy(iv_ptr, iv, sa->iv_len); @@ -146,7 +121,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, case RTE_CRYPTO_AUTH_SHA1_HMAC: case RTE_CRYPTO_AUTH_SHA256_HMAC: sym_cop->auth.data.offset = ip_hdr_len; - sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) + sa->iv_len + payload_len; break; default: @@ -178,7 +153,8 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, RTE_ASSERT(sa != NULL); RTE_ASSERT(cop != NULL); - if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) || + (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) { if (m->ol_flags & PKT_RX_SEC_OFFLOAD) { if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) cop->status = RTE_CRYPTO_OP_STATUS_ERROR; @@ -189,7 +165,7 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, } if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { - RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n"); + RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__); return -1; } @@ -219,7 +195,7 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, if (unlikely(sa->flags == TRANSPORT)) { ip = rte_pktmbuf_mtod(m, struct ip *); ip4 = (struct ip *)rte_pktmbuf_adj(m, - sizeof(struct esp_hdr) + sa->iv_len); + sizeof(struct rte_esp_hdr) + sa->iv_len); if (likely(ip->ip_v == IPVERSION)) { memmove(ip4, ip, ip->ip_hl * 4); ip4->ip_p = *nexthdr; @@ -233,7 +209,7 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, sizeof(struct ip6_hdr)); } } else - ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len); + ipip_inbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len); return 0; } @@ -244,7 +220,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, { struct ip *ip4; struct ip6_hdr *ip6; - struct esp_hdr *esp = NULL; + struct rte_esp_hdr *esp = NULL; uint8_t *padding = NULL, *new_ip, nlp; struct rte_crypto_sym_op *sym_cop; int32_t i; @@ -295,7 +271,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, } /* Check maximum packet size */ - if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len + + if (unlikely(ip_hdr_len + sizeof(struct rte_esp_hdr) + sa->iv_len + pad_payload_len + sa->digest_len > IP_MAXPACKET)) { RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n"); return -EINVAL; @@ -317,20 +293,20 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, switch (sa->flags) { case IP4_TUNNEL: - ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, + ip4 = ip4ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len, &sa->src, &sa->dst); - esp = (struct esp_hdr *)(ip4 + 1); + esp = (struct rte_esp_hdr *)(ip4 + 1); break; case IP6_TUNNEL: - ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, + ip6 = ip6ip_outbound(m, sizeof(struct rte_esp_hdr) + sa->iv_len, &sa->src, &sa->dst); - esp = (struct esp_hdr *)(ip6 + 1); + esp = (struct rte_esp_hdr *)(ip6 + 1); break; case TRANSPORT: new_ip = (uint8_t *)rte_pktmbuf_prepend(m, - sizeof(struct esp_hdr) + sa->iv_len); + sizeof(struct rte_esp_hdr) + sa->iv_len); memmove(new_ip, ip4, ip_hdr_len); - esp = (struct esp_hdr *)(new_ip + ip_hdr_len); + esp = (struct rte_esp_hdr *)(new_ip + ip_hdr_len); ip4 = (struct ip *)new_ip; if (likely(ip4->ip_v == IPVERSION)) { ip4->ip_p = IPPROTO_ESP; @@ -354,6 +330,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, } else { switch (sa->cipher_algo) { case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_3DES_CBC: case RTE_CRYPTO_CIPHER_AES_CBC: memset(iv, 0, sa->iv_len); break; @@ -388,7 +365,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, uint8_t *aad; sym_cop->aead.data.offset = ip_hdr_len + - sizeof(struct esp_hdr) + sa->iv_len; + sizeof(struct rte_esp_hdr) + sa->iv_len; sym_cop->aead.data.length = pad_payload_len; /* Fill pad_len using default sequential scheme */ @@ -415,14 +392,15 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, } else { switch (sa->cipher_algo) { case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_3DES_CBC: case RTE_CRYPTO_CIPHER_AES_CBC: sym_cop->cipher.data.offset = ip_hdr_len + - sizeof(struct esp_hdr); + sizeof(struct rte_esp_hdr); sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; break; case RTE_CRYPTO_CIPHER_AES_CTR: sym_cop->cipher.data.offset = ip_hdr_len + - sizeof(struct esp_hdr) + sa->iv_len; + sizeof(struct rte_esp_hdr) + sa->iv_len; sym_cop->cipher.data.length = pad_payload_len; break; default: @@ -447,7 +425,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, case RTE_CRYPTO_AUTH_SHA1_HMAC: case RTE_CRYPTO_AUTH_SHA256_HMAC: sym_cop->auth.data.offset = ip_hdr_len; - sym_cop->auth.data.length = sizeof(struct esp_hdr) + + sym_cop->auth.data.length = sizeof(struct rte_esp_hdr) + sa->iv_len + pad_payload_len; break; default: @@ -474,12 +452,14 @@ esp_outbound_post(struct rte_mbuf *m, RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); - if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) || + (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) { m->ol_flags |= PKT_TX_SEC_OFFLOAD; } else { RTE_ASSERT(cop != NULL); if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { - RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n"); + RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", + __func__); return -1; } }