X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fipsec%2Fsa.c;h=59a547637dfd29f8f16ac36deb681af537e9f716;hb=670692191a938fb8aaa887aa268437aacc51de7d;hp=720e0f365bf2b9cdcee79341a5ddb775721b8b09;hpb=c99d26197c535ecda727fb1d641c2bbd27f95374;p=dpdk.git diff --git a/lib/ipsec/sa.c b/lib/ipsec/sa.c index 720e0f365b..59a547637d 100644 --- a/lib/ipsec/sa.c +++ b/lib/ipsec/sa.c @@ -5,15 +5,13 @@ #include #include #include +#include #include -#include #include "sa.h" #include "ipsec_sqn.h" #include "crypto.h" -#include "iph.h" #include "misc.h" -#include "pad.h" #define MBUF_MAX_L2_LEN RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t) #define MBUF_MAX_L3_LEN RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t) @@ -135,7 +133,7 @@ ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket) /* * RFC 4303 recommends 64 as minimum window size. * there is no point to use ESN mode without SQN window, - * so make sure we have at least 64 window when ESN is enalbed. + * so make sure we have at least 64 window when ESN is enabled. */ wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) == RTE_IPSEC_SATP_ESN_DISABLE) ? @@ -217,6 +215,10 @@ fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type) } else return -EINVAL; + /* check for UDP encapsulation flag */ + if (prm->ipsec_xform.options.udp_encap == 1) + tp |= RTE_IPSEC_SATP_NATT_ENABLE; + /* check for ESN flag */ if (prm->ipsec_xform.options.esn == 0) tp |= RTE_IPSEC_SATP_ESN_DISABLE; @@ -289,11 +291,11 @@ esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm) * Init ESP outbound specific things. */ static void -esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen) +esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen, uint64_t sqn) { uint8_t algo_type; - sa->sqn.outb = 1; + sa->sqn.outb = sqn > 1 ? sqn : 1; algo_type = sa->algo_type; @@ -355,13 +357,23 @@ esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm) sa->hdr_len = prm->tun.hdr_len; sa->hdr_l3_off = prm->tun.hdr_l3_off; + memcpy(sa->hdr, prm->tun.hdr, prm->tun.hdr_len); + + /* insert UDP header if UDP encapsulation is enabled */ + if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) { + struct rte_udp_hdr *udph = (struct rte_udp_hdr *) + &sa->hdr[prm->tun.hdr_len]; + sa->hdr_len += sizeof(struct rte_udp_hdr); + udph->src_port = rte_cpu_to_be_16(prm->ipsec_xform.udp.sport); + udph->dst_port = rte_cpu_to_be_16(prm->ipsec_xform.udp.dport); + udph->dgram_cksum = 0; + } + /* update l2_len and l3_len fields for outbound mbuf */ sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off, sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0); - memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len); - - esp_outb_init(sa, sa->hdr_len); + esp_outb_init(sa, sa->hdr_len, prm->ipsec_xform.esn.value); } /* @@ -372,7 +384,8 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, const struct crypto_xform *cxf) { static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK | - RTE_IPSEC_SATP_MODE_MASK; + RTE_IPSEC_SATP_MODE_MASK | + RTE_IPSEC_SATP_NATT_MASK; if (prm->ipsec_xform.options.ecn) sa->tos_mask |= RTE_IPV4_HDR_ECN_MASK; @@ -475,12 +488,18 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS): esp_inb_init(sa); break; + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4 | + RTE_IPSEC_SATP_NATT_ENABLE): + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6 | + RTE_IPSEC_SATP_NATT_ENABLE): case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4): case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6): esp_outb_tun_init(sa, prm); break; + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS | + RTE_IPSEC_SATP_NATT_ENABLE): case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS): - esp_outb_init(sa, 0); + esp_outb_init(sa, 0, prm->ipsec_xform.esn.value); break; } @@ -491,15 +510,19 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, * helper function, init SA replay structure. */ static void -fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket) +fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket, + uint64_t sqn) { sa->replay.win_sz = wnd_sz; sa->replay.nb_bucket = nb_bucket; sa->replay.bucket_index_mask = nb_bucket - 1; sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1); - if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM) + sa->sqn.inb.rsn[0]->sqn = sqn; + if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM) { sa->sqn.inb.rsn[1] = (struct replay_sqn *) ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket)); + sa->sqn.inb.rsn[1]->sqn = sqn; + } } int @@ -551,9 +574,13 @@ rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) return -EINVAL; - if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL && - prm->tun.hdr_len > sizeof(sa->hdr)) - return -EINVAL; + if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { + uint32_t hlen = prm->tun.hdr_len; + if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) + hlen += sizeof(struct rte_udp_hdr); + if (hlen > sizeof(sa->hdr)) + return -EINVAL; + } rc = fill_crypto_xform(&cxf, type, prm); if (rc != 0) @@ -575,7 +602,7 @@ rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, /* fill replay window related fields */ if (nb != 0) - fill_sa_replay(sa, wsz, nb); + fill_sa_replay(sa, wsz, nb, prm->ipsec_xform.esn.value); return sz; } @@ -627,19 +654,25 @@ uint16_t pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num) { - uint32_t i, k; + uint32_t i, k, bytes; uint32_t dr[num]; RTE_SET_USED(ss); k = 0; + bytes = 0; for (i = 0; i != num; i++) { - if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) + if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) { k++; + bytes += mb[i]->pkt_len; + } else dr[i - k] = i; } + ss->sa->statistics.count += k; + ss->sa->statistics.bytes += bytes; + /* handle unprocessed mbufs */ if (k != num) { rte_errno = EBADMSG;