X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fipsec-secgw%2Fipsec_process.c;h=5012e1a6a4cdad158f629a8d664720fce71fc5e8;hb=0f27fe09ddcf500ab7fa34a1e67dec969c01b70d;hp=e403c461aa6678b94a57fe7bee7ea1e769200655;hpb=3e5f4625dc17064e5e44d7fddaf57d9d9afb7ae4;p=dpdk.git diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c index e403c461aa..5012e1a6a4 100644 --- a/examples/ipsec-secgw/ipsec_process.c +++ b/examples/ipsec-secgw/ipsec_process.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2020 Intel Corporation */ #include #include @@ -12,23 +12,13 @@ #include #include "ipsec.h" +#include "ipsec-secgw.h" #define SATP_OUT_IPV4(t) \ ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \ (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \ ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4) - -/* helper routine to free bulk of packets */ -static inline void -free_pkts(struct rte_mbuf *mb[], uint32_t n) -{ - uint32_t i; - - for (i = 0; i != n; i++) - rte_pktmbuf_free(mb[i]); -} - /* helper routine to free bulk of crypto-ops and related packets */ static inline void free_cops(struct rte_crypto_op *cop[], uint32_t n) @@ -93,24 +83,20 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx, int32_t rc; /* setup crypto section */ - if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) { - if (sa->crypto_session == NULL) { - rc = create_session(ctx, sa); - if (rc != 0) - return rc; - } - ss->crypto.ses = sa->crypto_session; + if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE || + ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { + RTE_ASSERT(ss->crypto.ses == NULL); + rc = create_lookaside_session(ctx, sa, ss); + if (rc != 0) + return rc; /* setup session action type */ - } else { - if (sa->sec_session == NULL) { - rc = create_session(ctx, sa); - if (rc != 0) - return rc; - } - ss->security.ses = sa->sec_session; - ss->security.ctx = sa->security_ctx; - ss->security.ol_flags = sa->ol_flags; - } + } else if (ss->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { + RTE_ASSERT(ss->security.ses == NULL); + rc = create_lookaside_session(ctx, sa, ss); + if (rc != 0) + return rc; + } else + RTE_ASSERT(0); rc = rte_ipsec_session_prepare(ss); if (rc != 0) @@ -123,7 +109,7 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx, * group input packets byt the SA they belong to. */ static uint32_t -sa_group(struct ipsec_sa *sa_ptr[], struct rte_mbuf *pkts[], +sa_group(void *sa_ptr[], struct rte_mbuf *pkts[], struct rte_ipsec_group grp[], uint32_t num) { uint32_t i, n, spi; @@ -131,6 +117,7 @@ sa_group(struct ipsec_sa *sa_ptr[], struct rte_mbuf *pkts[], void * const nosa = &spi; sa = nosa; + grp[0].m = pkts; for (i = 0, n = 0; i != num; i++) { if (sa != sa_ptr[i]) { @@ -190,6 +177,93 @@ copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[], out->num += num; } +static uint32_t +ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa, + struct rte_ipsec_session *ips, struct rte_mbuf **m, + unsigned int cnt) +{ + struct cdev_qp *cqp; + struct rte_crypto_op *cop[cnt]; + uint32_t j, k; + struct ipsec_mbuf_metadata *priv; + + cqp = &ctx->tbl[sa->cdev_id_qp]; + + /* for that app each mbuf has it's own crypto op */ + for (j = 0; j != cnt; j++) { + priv = get_priv(m[j]); + cop[j] = &priv->cop; + /* + * this is just to satisfy inbound_sa_check() + * should be removed in future. + */ + priv->sa = sa; + } + + /* prepare and enqueue crypto ops */ + k = rte_ipsec_pkt_crypto_prepare(ips, m, cop, cnt); + if (k != 0) + enqueue_cop_bulk(cqp, cop, k); + + return k; +} + +/* + * helper routine for inline and cpu(synchronous) processing + * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt(). + * Should be removed in future. + */ +static inline void +prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint32_t j; + struct ipsec_mbuf_metadata *priv; + + for (j = 0; j != cnt; j++) { + priv = get_priv(mb[j]); + priv->sa = sa; + } +} + +/* + * finish processing of packets successfully decrypted by an inline processor + */ +static uint32_t +ipsec_process_inline_group(struct rte_ipsec_session *ips, void *sa, + struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint64_t satp; + uint32_t k; + + /* get SA type */ + satp = rte_ipsec_sa_type(ips->sa); + prep_process_group(sa, mb, cnt); + + k = rte_ipsec_pkt_process(ips, mb, cnt); + copy_to_trf(trf, satp, mb, k); + return k; +} + +/* + * process packets synchronously + */ +static uint32_t +ipsec_process_cpu_group(struct rte_ipsec_session *ips, void *sa, + struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint64_t satp; + uint32_t k; + + /* get SA type */ + satp = rte_ipsec_sa_type(ips->sa); + prep_process_group(sa, mb, cnt); + + k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt); + k = rte_ipsec_pkt_process(ips, mb, k); + copy_to_trf(trf, satp, mb, k); + return k; +} + /* * Process ipsec packets. * If packet belong to SA that is subject of inline-crypto, @@ -200,14 +274,10 @@ copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[], void ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) { - uint64_t satp; - uint32_t i, j, k, n; + uint32_t i, k, n; struct ipsec_sa *sa; - struct ipsec_mbuf_metadata *priv; struct rte_ipsec_group *pg; struct rte_ipsec_session *ips; - struct cdev_qp *cqp; - struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)]; struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)]; n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num); @@ -215,59 +285,42 @@ ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) for (i = 0; i != n; i++) { pg = grp + i; - sa = pg->id.ptr; + sa = ipsec_mask_saptr(pg->id.ptr); - /* no valid SA found */ - if (sa == NULL) - k = 0; - - ips = &sa->ips; - satp = rte_ipsec_sa_type(ips->sa); + /* fallback to cryptodev with RX packets which inline + * processor was unable to process + */ + if (sa != NULL) + ips = (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ? + ipsec_get_fallback_session(sa) : + ipsec_get_primary_session(sa); /* no valid HW session for that SA, try to create one */ - if (ips->crypto.ses == NULL && - fill_ipsec_session(ips, ctx, sa) != 0) + if (sa == NULL || (ips->crypto.ses == NULL && + fill_ipsec_session(ips, ctx, sa) != 0)) k = 0; /* process packets inline */ - else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || - sa->type == - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { - - /* - * This is just to satisfy inbound_sa_check() - * and get_hop_for_offload_pkt(). - * Should be removed in future. - */ - for (j = 0; j != pg->cnt; j++) { - priv = get_priv(pg->m[j]); - priv->sa = sa; + else { + switch (ips->type) { + /* enqueue packets to crypto dev */ + case RTE_SECURITY_ACTION_TYPE_NONE: + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + k = ipsec_prepare_crypto_group(ctx, sa, ips, + pg->m, pg->cnt); + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + k = ipsec_process_inline_group(ips, sa, + trf, pg->m, pg->cnt); + break; + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + k = ipsec_process_cpu_group(ips, sa, + trf, pg->m, pg->cnt); + break; + default: + k = 0; } - - k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt); - copy_to_trf(trf, satp, pg->m, k); - - /* enqueue packets to crypto dev */ - } else { - - cqp = &ctx->tbl[sa->cdev_id_qp]; - - /* for that app each mbuf has it's own crypto op */ - for (j = 0; j != pg->cnt; j++) { - priv = get_priv(pg->m[j]); - cop[j] = &priv->cop; - /* - * this is just to satisfy inbound_sa_check() - * should be removed in future. - */ - priv->sa = sa; - } - - /* prepare and enqueue crypto ops */ - k = rte_ipsec_pkt_crypto_prepare(ips, pg->m, cop, - pg->cnt); - if (k != 0) - enqueue_cop_bulk(cqp, cop, k); } /* drop packets that cannot be enqueued/processed */