X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fipsec-secgw%2Fipsec_process.c;h=5012e1a6a4cdad158f629a8d664720fce71fc5e8;hb=fdadccfa3fb97acf06641a5fa823b05de1cca0bb;hp=239d81ef66480a3ff167d19fd083c0dc656c74ad;hpb=4a67af84f11335e956fae9bcbece2ed12f02019e;p=dpdk.git diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c index 239d81ef66..5012e1a6a4 100644 --- a/examples/ipsec-secgw/ipsec_process.c +++ b/examples/ipsec-secgw/ipsec_process.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2020 Intel Corporation */ #include #include @@ -12,23 +12,13 @@ #include #include "ipsec.h" +#include "ipsec-secgw.h" #define SATP_OUT_IPV4(t) \ ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \ (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \ ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4) - -/* helper routine to free bulk of packets */ -static inline void -free_pkts(struct rte_mbuf *mb[], uint32_t n) -{ - uint32_t i; - - for (i = 0; i != n; i++) - rte_pktmbuf_free(mb[i]); -} - /* helper routine to free bulk of crypto-ops and related packets */ static inline void free_cops(struct rte_crypto_op *cop[], uint32_t n) @@ -93,7 +83,8 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx, int32_t rc; /* setup crypto section */ - if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) { + if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE || + ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { RTE_ASSERT(ss->crypto.ses == NULL); rc = create_lookaside_session(ctx, sa, ss); if (rc != 0) @@ -118,7 +109,7 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx, * group input packets byt the SA they belong to. */ static uint32_t -sa_group(struct ipsec_sa *sa_ptr[], struct rte_mbuf *pkts[], +sa_group(void *sa_ptr[], struct rte_mbuf *pkts[], struct rte_ipsec_group grp[], uint32_t num) { uint32_t i, n, spi; @@ -126,6 +117,7 @@ sa_group(struct ipsec_sa *sa_ptr[], struct rte_mbuf *pkts[], void * const nosa = &spi; sa = nosa; + grp[0].m = pkts; for (i = 0, n = 0; i != num; i++) { if (sa != sa_ptr[i]) { @@ -185,6 +177,93 @@ copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[], out->num += num; } +static uint32_t +ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa, + struct rte_ipsec_session *ips, struct rte_mbuf **m, + unsigned int cnt) +{ + struct cdev_qp *cqp; + struct rte_crypto_op *cop[cnt]; + uint32_t j, k; + struct ipsec_mbuf_metadata *priv; + + cqp = &ctx->tbl[sa->cdev_id_qp]; + + /* for that app each mbuf has it's own crypto op */ + for (j = 0; j != cnt; j++) { + priv = get_priv(m[j]); + cop[j] = &priv->cop; + /* + * this is just to satisfy inbound_sa_check() + * should be removed in future. + */ + priv->sa = sa; + } + + /* prepare and enqueue crypto ops */ + k = rte_ipsec_pkt_crypto_prepare(ips, m, cop, cnt); + if (k != 0) + enqueue_cop_bulk(cqp, cop, k); + + return k; +} + +/* + * helper routine for inline and cpu(synchronous) processing + * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt(). + * Should be removed in future. + */ +static inline void +prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint32_t j; + struct ipsec_mbuf_metadata *priv; + + for (j = 0; j != cnt; j++) { + priv = get_priv(mb[j]); + priv->sa = sa; + } +} + +/* + * finish processing of packets successfully decrypted by an inline processor + */ +static uint32_t +ipsec_process_inline_group(struct rte_ipsec_session *ips, void *sa, + struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint64_t satp; + uint32_t k; + + /* get SA type */ + satp = rte_ipsec_sa_type(ips->sa); + prep_process_group(sa, mb, cnt); + + k = rte_ipsec_pkt_process(ips, mb, cnt); + copy_to_trf(trf, satp, mb, k); + return k; +} + +/* + * process packets synchronously + */ +static uint32_t +ipsec_process_cpu_group(struct rte_ipsec_session *ips, void *sa, + struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint64_t satp; + uint32_t k; + + /* get SA type */ + satp = rte_ipsec_sa_type(ips->sa); + prep_process_group(sa, mb, cnt); + + k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt); + k = rte_ipsec_pkt_process(ips, mb, k); + copy_to_trf(trf, satp, mb, k); + return k; +} + /* * Process ipsec packets. * If packet belong to SA that is subject of inline-crypto, @@ -195,14 +274,10 @@ copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[], void ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) { - uint64_t satp; - uint32_t i, j, k, n; + uint32_t i, k, n; struct ipsec_sa *sa; - struct ipsec_mbuf_metadata *priv; struct rte_ipsec_group *pg; struct rte_ipsec_session *ips; - struct cdev_qp *cqp; - struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)]; struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)]; n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num); @@ -210,9 +285,15 @@ ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) for (i = 0; i != n; i++) { pg = grp + i; - sa = pg->id.ptr; + sa = ipsec_mask_saptr(pg->id.ptr); - ips = ipsec_get_session(sa); + /* fallback to cryptodev with RX packets which inline + * processor was unable to process + */ + if (sa != NULL) + ips = (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ? + ipsec_get_fallback_session(sa) : + ipsec_get_primary_session(sa); /* no valid HW session for that SA, try to create one */ if (sa == NULL || (ips->crypto.ses == NULL && @@ -220,46 +301,26 @@ ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) k = 0; /* process packets inline */ - else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || - ips->type == - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { - - satp = rte_ipsec_sa_type(ips->sa); - - /* - * This is just to satisfy inbound_sa_check() - * and get_hop_for_offload_pkt(). - * Should be removed in future. - */ - for (j = 0; j != pg->cnt; j++) { - priv = get_priv(pg->m[j]); - priv->sa = sa; + else { + switch (ips->type) { + /* enqueue packets to crypto dev */ + case RTE_SECURITY_ACTION_TYPE_NONE: + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + k = ipsec_prepare_crypto_group(ctx, sa, ips, + pg->m, pg->cnt); + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + k = ipsec_process_inline_group(ips, sa, + trf, pg->m, pg->cnt); + break; + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + k = ipsec_process_cpu_group(ips, sa, + trf, pg->m, pg->cnt); + break; + default: + k = 0; } - - k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt); - copy_to_trf(trf, satp, pg->m, k); - - /* enqueue packets to crypto dev */ - } else { - - cqp = &ctx->tbl[sa->cdev_id_qp]; - - /* for that app each mbuf has it's own crypto op */ - for (j = 0; j != pg->cnt; j++) { - priv = get_priv(pg->m[j]); - cop[j] = &priv->cop; - /* - * this is just to satisfy inbound_sa_check() - * should be removed in future. - */ - priv->sa = sa; - } - - /* prepare and enqueue crypto ops */ - k = rte_ipsec_pkt_crypto_prepare(ips, pg->m, cop, - pg->cnt); - if (k != 0) - enqueue_cop_bulk(cqp, cop, k); } /* drop packets that cannot be enqueued/processed */