X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ipsec%2Fsa.c;h=ada195cf846cd70332103af7be8928533b88ad88;hb=0dcba52562873f5efa437b61821be7c1bd459a34;hp=1cb71caa1290623aea56ab227cffbdb64a922d1a;hpb=5ef2546767525c8a4a3c2bc60357b23c3dffcf6b;p=dpdk.git diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c index 1cb71caa12..ada195cf84 100644 --- a/lib/librte_ipsec/sa.c +++ b/lib/librte_ipsec/sa.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018-2020 Intel Corporation */ #include @@ -77,7 +77,7 @@ fill_crypto_xform(struct crypto_xform *xform, uint64_t type, return 0; } -uint64_t __rte_experimental +uint64_t rte_ipsec_sa_type(const struct rte_ipsec_sa *sa) { return sa->type; @@ -149,7 +149,7 @@ ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket) return sz; } -void __rte_experimental +void rte_ipsec_sa_fini(struct rte_ipsec_sa *sa) { memset(sa, 0, sa->size); @@ -214,6 +214,18 @@ fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type) else tp |= RTE_IPSEC_SATP_ESN_ENABLE; + /* check for ECN flag */ + if (prm->ipsec_xform.options.ecn == 0) + tp |= RTE_IPSEC_SATP_ECN_DISABLE; + else + tp |= RTE_IPSEC_SATP_ECN_ENABLE; + + /* check for DSCP flag */ + if (prm->ipsec_xform.options.copy_dscp == 0) + tp |= RTE_IPSEC_SATP_DSCP_DISABLE; + else + tp |= RTE_IPSEC_SATP_DSCP_ENABLE; + /* interpret flags */ if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM) tp |= RTE_IPSEC_SATP_SQN_ATOM; @@ -231,10 +243,26 @@ static void esp_inb_init(struct rte_ipsec_sa *sa) { /* these params may differ with new algorithms support */ - sa->ctp.auth.offset = 0; - sa->ctp.auth.length = sa->icv_len - sa->sqh_len; sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len; sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset; + + /* + * for AEAD and NULL algorithms we can assume that + * auth and cipher offsets would be equal. + */ + switch (sa->algo_type) { + case ALGO_TYPE_AES_GCM: + case ALGO_TYPE_NULL: + sa->ctp.auth.raw = sa->ctp.cipher.raw; + break; + default: + sa->ctp.auth.offset = 0; + sa->ctp.auth.length = sa->icv_len - sa->sqh_len; + sa->cofs.ofs.cipher.tail = sa->sqh_len; + break; + } + + sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset; } /* @@ -257,13 +285,13 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen) sa->sqn.outb.raw = 1; - /* these params may differ with new algorithms support */ - sa->ctp.auth.offset = hlen; - sa->ctp.auth.length = sizeof(struct rte_esp_hdr) + - sa->iv_len + sa->sqh_len; - algo_type = sa->algo_type; + /* + * Setup auth and cipher length and offset. + * these params may differ with new algorithms support + */ + switch (algo_type) { case ALGO_TYPE_AES_GCM: case ALGO_TYPE_AES_CTR: @@ -274,11 +302,30 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen) break; case ALGO_TYPE_AES_CBC: case ALGO_TYPE_3DES_CBC: - sa->ctp.cipher.offset = sa->hdr_len + - sizeof(struct rte_esp_hdr); + sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr); sa->ctp.cipher.length = sa->iv_len; break; } + + /* + * for AEAD and NULL algorithms we can assume that + * auth and cipher offsets would be equal. + */ + switch (algo_type) { + case ALGO_TYPE_AES_GCM: + case ALGO_TYPE_NULL: + sa->ctp.auth.raw = sa->ctp.cipher.raw; + break; + default: + sa->ctp.auth.offset = hlen; + sa->ctp.auth.length = sizeof(struct rte_esp_hdr) + + sa->iv_len + sa->sqh_len; + break; + } + + sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset; + sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) - + (sa->ctp.cipher.offset + sa->ctp.cipher.length); } /* @@ -310,6 +357,12 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK | RTE_IPSEC_SATP_MODE_MASK; + if (prm->ipsec_xform.options.ecn) + sa->tos_mask |= RTE_IPV4_HDR_ECN_MASK; + + if (prm->ipsec_xform.options.copy_dscp) + sa->tos_mask |= RTE_IPV4_HDR_DSCP_MASK; + if (cxf->aead != NULL) { switch (cxf->aead->algo) { case RTE_CRYPTO_AEAD_AES_GCM: @@ -405,7 +458,7 @@ fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket) ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket)); } -int __rte_experimental +int rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm) { uint64_t type; @@ -421,11 +474,11 @@ rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm) return rc; /* determine required size */ - wsz = prm->replay_win_sz; + wsz = prm->ipsec_xform.replay_win_sz; return ipsec_sa_size(type, &wsz, &nb); } -int __rte_experimental +int rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, uint32_t size) { @@ -443,7 +496,7 @@ rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, return rc; /* determine required size */ - wsz = prm->replay_win_sz; + wsz = prm->ipsec_xform.replay_win_sz; sz = ipsec_sa_size(type, &wsz, &nb); if (sz < 0) return sz; @@ -526,9 +579,9 @@ lksd_proto_prepare(const struct rte_ipsec_session *ss, * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled */ -static uint16_t -pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], - uint16_t num) +uint16_t +pkt_flag_process(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num) { uint32_t i, k; uint32_t dr[num]; @@ -570,21 +623,59 @@ lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa, switch (sa->type & msk) { case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4): case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6): - pf->prepare = esp_inb_pkt_prepare; + pf->prepare.async = esp_inb_pkt_prepare; pf->process = esp_inb_tun_pkt_process; break; case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS): - pf->prepare = esp_inb_pkt_prepare; + pf->prepare.async = esp_inb_pkt_prepare; pf->process = esp_inb_trs_pkt_process; break; case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4): case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6): - pf->prepare = esp_outb_tun_prepare; + pf->prepare.async = esp_outb_tun_prepare; pf->process = (sa->sqh_len != 0) ? esp_outb_sqh_process : pkt_flag_process; break; case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS): - pf->prepare = esp_outb_trs_prepare; + pf->prepare.async = esp_outb_trs_prepare; + pf->process = (sa->sqh_len != 0) ? + esp_outb_sqh_process : pkt_flag_process; + break; + default: + rc = -ENOTSUP; + } + + return rc; +} + +static int +cpu_crypto_pkt_func_select(const struct rte_ipsec_sa *sa, + struct rte_ipsec_sa_pkt_func *pf) +{ + int32_t rc; + + static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK | + RTE_IPSEC_SATP_MODE_MASK; + + rc = 0; + switch (sa->type & msk) { + case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4): + case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6): + pf->prepare.sync = cpu_inb_pkt_prepare; + pf->process = esp_inb_tun_pkt_process; + break; + case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS): + pf->prepare.sync = cpu_inb_pkt_prepare; + pf->process = esp_inb_trs_pkt_process; + break; + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4): + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6): + pf->prepare.sync = cpu_outb_tun_pkt_prepare; + pf->process = (sa->sqh_len != 0) ? + esp_outb_sqh_process : pkt_flag_process; + break; + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS): + pf->prepare.sync = cpu_outb_trs_pkt_prepare; pf->process = (sa->sqh_len != 0) ? esp_outb_sqh_process : pkt_flag_process; break; @@ -612,10 +703,10 @@ inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa, switch (sa->type & msk) { case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4): case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6): - pf->process = esp_inb_tun_pkt_process; + pf->process = inline_inb_tun_pkt_process; break; case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS): - pf->process = esp_inb_trs_pkt_process; + pf->process = inline_inb_trs_pkt_process; break; case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4): case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6): @@ -642,7 +733,7 @@ ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss, int32_t rc; rc = 0; - pf[0] = (struct rte_ipsec_sa_pkt_func) { 0 }; + pf[0] = (struct rte_ipsec_sa_pkt_func) { {NULL}, NULL }; switch (ss->type) { case RTE_SECURITY_ACTION_TYPE_NONE: @@ -659,9 +750,12 @@ ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss, pf->process = inline_proto_outb_pkt_process; break; case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: - pf->prepare = lksd_proto_prepare; + pf->prepare.async = lksd_proto_prepare; pf->process = pkt_flag_process; break; + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + rc = cpu_crypto_pkt_func_select(sa, pf); + break; default: rc = -ENOTSUP; }