X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fipsec%2Fsa.c;h=59a547637dfd29f8f16ac36deb681af537e9f716;hb=670692191a938fb8aaa887aa268437aacc51de7d;hp=e59189d215b3837ee43d8f3a768aba77e6f5ace1;hpb=99a2dd955fba6e4cc23b77d590a033650ced9c45;p=dpdk.git diff --git a/lib/ipsec/sa.c b/lib/ipsec/sa.c index e59189d215..59a547637d 100644 --- a/lib/ipsec/sa.c +++ b/lib/ipsec/sa.c @@ -5,15 +5,13 @@ #include #include #include +#include #include -#include #include "sa.h" #include "ipsec_sqn.h" #include "crypto.h" -#include "iph.h" #include "misc.h" -#include "pad.h" #define MBUF_MAX_L2_LEN RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t) #define MBUF_MAX_L3_LEN RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t) @@ -47,6 +45,15 @@ fill_crypto_xform(struct crypto_xform *xform, uint64_t type, if (xfn != NULL) return -EINVAL; xform->aead = &xf->aead; + + /* GMAC has only auth */ + } else if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xf->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { + if (xfn != NULL) + return -EINVAL; + xform->auth = &xf->auth; + xform->cipher = &xfn->cipher; + /* * CIPHER+AUTH xforms are expected in strict order, * depending on SA direction: @@ -126,7 +133,7 @@ ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket) /* * RFC 4303 recommends 64 as minimum window size. * there is no point to use ESN mode without SQN window, - * so make sure we have at least 64 window when ESN is enalbed. + * so make sure we have at least 64 window when ESN is enabled. */ wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) == RTE_IPSEC_SATP_ESN_DISABLE) ? @@ -208,6 +215,10 @@ fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type) } else return -EINVAL; + /* check for UDP encapsulation flag */ + if (prm->ipsec_xform.options.udp_encap == 1) + tp |= RTE_IPSEC_SATP_NATT_ENABLE; + /* check for ESN flag */ if (prm->ipsec_xform.options.esn == 0) tp |= RTE_IPSEC_SATP_ESN_DISABLE; @@ -247,12 +258,13 @@ esp_inb_init(struct rte_ipsec_sa *sa) sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset; /* - * for AEAD and NULL algorithms we can assume that + * for AEAD algorithms we can assume that * auth and cipher offsets would be equal. */ switch (sa->algo_type) { case ALGO_TYPE_AES_GCM: - case ALGO_TYPE_NULL: + case ALGO_TYPE_AES_CCM: + case ALGO_TYPE_CHACHA20_POLY1305: sa->ctp.auth.raw = sa->ctp.cipher.raw; break; default: @@ -279,11 +291,11 @@ esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm) * Init ESP outbound specific things. */ static void -esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen) +esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen, uint64_t sqn) { uint8_t algo_type; - sa->sqn.outb = 1; + sa->sqn.outb = sqn > 1 ? sqn : 1; algo_type = sa->algo_type; @@ -294,6 +306,8 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen) switch (algo_type) { case ALGO_TYPE_AES_GCM: + case ALGO_TYPE_AES_CCM: + case ALGO_TYPE_CHACHA20_POLY1305: case ALGO_TYPE_AES_CTR: case ALGO_TYPE_NULL: sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr) + @@ -305,15 +319,20 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen) sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr); sa->ctp.cipher.length = sa->iv_len; break; + case ALGO_TYPE_AES_GMAC: + sa->ctp.cipher.offset = 0; + sa->ctp.cipher.length = 0; + break; } /* - * for AEAD and NULL algorithms we can assume that + * for AEAD algorithms we can assume that * auth and cipher offsets would be equal. */ switch (algo_type) { case ALGO_TYPE_AES_GCM: - case ALGO_TYPE_NULL: + case ALGO_TYPE_AES_CCM: + case ALGO_TYPE_CHACHA20_POLY1305: sa->ctp.auth.raw = sa->ctp.cipher.raw; break; default: @@ -338,13 +357,23 @@ esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm) sa->hdr_len = prm->tun.hdr_len; sa->hdr_l3_off = prm->tun.hdr_l3_off; + memcpy(sa->hdr, prm->tun.hdr, prm->tun.hdr_len); + + /* insert UDP header if UDP encapsulation is enabled */ + if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) { + struct rte_udp_hdr *udph = (struct rte_udp_hdr *) + &sa->hdr[prm->tun.hdr_len]; + sa->hdr_len += sizeof(struct rte_udp_hdr); + udph->src_port = rte_cpu_to_be_16(prm->ipsec_xform.udp.sport); + udph->dst_port = rte_cpu_to_be_16(prm->ipsec_xform.udp.dport); + udph->dgram_cksum = 0; + } + /* update l2_len and l3_len fields for outbound mbuf */ sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off, sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0); - memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len); - - esp_outb_init(sa, sa->hdr_len); + esp_outb_init(sa, sa->hdr_len, prm->ipsec_xform.esn.value); } /* @@ -355,7 +384,8 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, const struct crypto_xform *cxf) { static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK | - RTE_IPSEC_SATP_MODE_MASK; + RTE_IPSEC_SATP_MODE_MASK | + RTE_IPSEC_SATP_NATT_MASK; if (prm->ipsec_xform.options.ecn) sa->tos_mask |= RTE_IPV4_HDR_ECN_MASK; @@ -374,13 +404,39 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, sa->pad_align = IPSEC_PAD_AES_GCM; sa->algo_type = ALGO_TYPE_AES_GCM; break; + case RTE_CRYPTO_AEAD_AES_CCM: + /* RFC 4309 */ + sa->aad_len = sizeof(struct aead_ccm_aad); + sa->icv_len = cxf->aead->digest_length; + sa->iv_ofs = cxf->aead->iv.offset; + sa->iv_len = sizeof(uint64_t); + sa->pad_align = IPSEC_PAD_AES_CCM; + sa->algo_type = ALGO_TYPE_AES_CCM; + break; + case RTE_CRYPTO_AEAD_CHACHA20_POLY1305: + /* RFC 7634 & 8439*/ + sa->aad_len = sizeof(struct aead_chacha20_poly1305_aad); + sa->icv_len = cxf->aead->digest_length; + sa->iv_ofs = cxf->aead->iv.offset; + sa->iv_len = sizeof(uint64_t); + sa->pad_align = IPSEC_PAD_CHACHA20_POLY1305; + sa->algo_type = ALGO_TYPE_CHACHA20_POLY1305; + break; default: return -EINVAL; } + } else if (cxf->auth->algo == RTE_CRYPTO_AUTH_AES_GMAC) { + /* RFC 4543 */ + /* AES-GMAC is a special case of auth that needs IV */ + sa->pad_align = IPSEC_PAD_AES_GMAC; + sa->iv_len = sizeof(uint64_t); + sa->icv_len = cxf->auth->digest_length; + sa->iv_ofs = cxf->auth->iv.offset; + sa->algo_type = ALGO_TYPE_AES_GMAC; + } else { sa->icv_len = cxf->auth->digest_length; sa->iv_ofs = cxf->cipher->iv.offset; - sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0; switch (cxf->cipher->algo) { case RTE_CRYPTO_CIPHER_NULL: @@ -414,6 +470,7 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, } } + sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0; sa->udata = prm->userdata; sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi); sa->salt = prm->ipsec_xform.salt; @@ -431,12 +488,18 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS): esp_inb_init(sa); break; + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4 | + RTE_IPSEC_SATP_NATT_ENABLE): + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6 | + RTE_IPSEC_SATP_NATT_ENABLE): case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4): case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6): esp_outb_tun_init(sa, prm); break; + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS | + RTE_IPSEC_SATP_NATT_ENABLE): case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS): - esp_outb_init(sa, 0); + esp_outb_init(sa, 0, prm->ipsec_xform.esn.value); break; } @@ -447,15 +510,19 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, * helper function, init SA replay structure. */ static void -fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket) +fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket, + uint64_t sqn) { sa->replay.win_sz = wnd_sz; sa->replay.nb_bucket = nb_bucket; sa->replay.bucket_index_mask = nb_bucket - 1; sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1); - if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM) + sa->sqn.inb.rsn[0]->sqn = sqn; + if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM) { sa->sqn.inb.rsn[1] = (struct replay_sqn *) ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket)); + sa->sqn.inb.rsn[1]->sqn = sqn; + } } int @@ -507,9 +574,13 @@ rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) return -EINVAL; - if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL && - prm->tun.hdr_len > sizeof(sa->hdr)) - return -EINVAL; + if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { + uint32_t hlen = prm->tun.hdr_len; + if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) + hlen += sizeof(struct rte_udp_hdr); + if (hlen > sizeof(sa->hdr)) + return -EINVAL; + } rc = fill_crypto_xform(&cxf, type, prm); if (rc != 0) @@ -531,7 +602,7 @@ rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, /* fill replay window related fields */ if (nb != 0) - fill_sa_replay(sa, wsz, nb); + fill_sa_replay(sa, wsz, nb, prm->ipsec_xform.esn.value); return sz; } @@ -583,19 +654,25 @@ uint16_t pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num) { - uint32_t i, k; + uint32_t i, k, bytes; uint32_t dr[num]; RTE_SET_USED(ss); k = 0; + bytes = 0; for (i = 0; i != num; i++) { - if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) + if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) { k++; + bytes += mb[i]->pkt_len; + } else dr[i - k] = i; } + ss->sa->statistics.count += k; + ss->sa->statistics.bytes += bytes; + /* handle unprocessed mbufs */ if (k != num) { rte_errno = EBADMSG;