Changes to make ipsec-secgw data-path code to utilize librte_ipsec library.
Note that right now by default current (non-librte_ipsec) code-path will
be used. User has to run application with new command-line option ('-l')
to enable new codepath.
Signed-off-by: Mohammad Abdul Awal <mohammad.abdul.awal@intel.com>
Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Radu Nicolau <radu.nicolau@intel.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
SRCS-y += sp6.c
SRCS-y += sa.c
SRCS-y += rt.c
+SRCS-y += ipsec_process.c
SRCS-y += ipsec-secgw.c
CFLAGS += -gdwarf-2
static struct socket_ctx socket_ctx[NB_SOCKETS];
-struct traffic_type {
- const uint8_t *data[MAX_PKT_BURST * 2];
- struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
- uint32_t res[MAX_PKT_BURST * 2];
- uint32_t num;
-};
-
-struct ipsec_traffic {
- struct traffic_type ipsec;
- struct traffic_type ip4;
- struct traffic_type ip6;
-};
-
static inline void
prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
{
t->ip4.data[t->ip4.num] = nlp;
t->ip4.pkts[(t->ip4.num)++] = pkt;
}
+ pkt->l2_len = 0;
+ pkt->l3_len = sizeof(struct ip);
} else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
t->ip6.data[t->ip6.num] = nlp;
t->ip6.pkts[(t->ip6.num)++] = pkt;
}
+ pkt->l2_len = 0;
+ pkt->l3_len = sizeof(struct ip6_hdr);
} else {
/* Unknown/Unsupported type, drop the packet */
RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
n_ip4 = traffic->ip4.num;
n_ip6 = traffic->ip6.num;
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
- traffic->ipsec.num, MAX_PKT_BURST);
-
- split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
+ if (app_sa_prm.enable == 0) {
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
+ split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
+ } else {
+ inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.saptr, traffic->ipsec.num);
+ ipsec_process(ipsec_ctx, traffic);
+ }
inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
n_ip4);
outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
- nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
- traffic->ipsec.res, traffic->ipsec.num,
- MAX_PKT_BURST);
-
- for (i = 0; i < nb_pkts_out; i++) {
- m = traffic->ipsec.pkts[i];
- struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
- if (ip->ip_v == IPVERSION) {
- idx = traffic->ip4.num++;
- traffic->ip4.pkts[idx] = m;
- } else {
- idx = traffic->ip6.num++;
- traffic->ip6.pkts[idx] = m;
+ if (app_sa_prm.enable == 0) {
+
+ nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.res, traffic->ipsec.num,
+ MAX_PKT_BURST);
+
+ for (i = 0; i < nb_pkts_out; i++) {
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ } else {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ }
}
+ } else {
+ outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
+ traffic->ipsec.saptr, traffic->ipsec.num);
+ ipsec_process(ipsec_ctx, traffic);
}
}
traffic->ip6.num = 0;
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
- traffic->ipsec.num, MAX_PKT_BURST);
+ if (app_sa_prm.enable == 0) {
- for (i = 0; i < nb_pkts_in; i++) {
- m = traffic->ipsec.pkts[i];
- struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
- if (ip->ip_v == IPVERSION) {
- idx = traffic->ip4.num++;
- traffic->ip4.pkts[idx] = m;
- } else {
- idx = traffic->ip6.num++;
- traffic->ip6.pkts[idx] = m;
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
+
+ for (i = 0; i < nb_pkts_in; i++) {
+ m = traffic->ipsec.pkts[i];
+ struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ idx = traffic->ip4.num++;
+ traffic->ip4.pkts[idx] = m;
+ } else {
+ idx = traffic->ip6.num++;
+ traffic->ip6.pkts[idx] = m;
+ }
}
+ } else {
+ inbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.saptr, traffic->ipsec.num);
+ ipsec_process(ipsec_ctx, traffic);
}
}
traffic->ip6.num = 0;
traffic->ipsec.num = n;
- nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
- traffic->ipsec.res, traffic->ipsec.num,
- MAX_PKT_BURST);
+ if (app_sa_prm.enable == 0) {
- /* They all sue the same SA (ip4 or ip6 tunnel) */
- m = traffic->ipsec.pkts[i];
- ip = rte_pktmbuf_mtod(m, struct ip *);
- if (ip->ip_v == IPVERSION) {
- traffic->ip4.num = nb_pkts_out;
- for (i = 0; i < nb_pkts_out; i++)
- traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
+ nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.res, traffic->ipsec.num,
+ MAX_PKT_BURST);
+
+ /* They all sue the same SA (ip4 or ip6 tunnel) */
+ m = traffic->ipsec.pkts[0];
+ ip = rte_pktmbuf_mtod(m, struct ip *);
+ if (ip->ip_v == IPVERSION) {
+ traffic->ip4.num = nb_pkts_out;
+ for (i = 0; i < nb_pkts_out; i++)
+ traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
+ } else {
+ traffic->ip6.num = nb_pkts_out;
+ for (i = 0; i < nb_pkts_out; i++)
+ traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
+ }
} else {
- traffic->ip6.num = nb_pkts_out;
- for (i = 0; i < nb_pkts_out; i++)
- traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
+ outbound_sa_lookup(ipsec_ctx->sa_ctx, traffic->ipsec.res,
+ traffic->ipsec.saptr, traffic->ipsec.num);
+ ipsec_process(ipsec_ctx, traffic);
}
}
uint32_t n;
struct ipsec_traffic trf;
- /* dequeue packets from crypto-queue */
- n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
+ if (app_sa_prm.enable == 0) {
+
+ /* dequeue packets from crypto-queue */
+ n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
RTE_DIM(trf.ipsec.pkts));
- if (n == 0)
- return;
- trf.ip4.num = 0;
- trf.ip6.num = 0;
+ trf.ip4.num = 0;
+ trf.ip6.num = 0;
- /* split traffic by ipv4-ipv6 */
- split46_traffic(&trf, trf.ipsec.pkts, n);
+ /* split traffic by ipv4-ipv6 */
+ split46_traffic(&trf, trf.ipsec.pkts, n);
+ } else
+ ipsec_cqp_process(ctx, &trf);
/* process ipv4 packets */
- inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ if (trf.ip4.num != 0) {
+ inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ }
/* process ipv6 packets */
- inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
- route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
+ if (trf.ip6.num != 0) {
+ inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
+ route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
+ }
}
static void
uint32_t n;
struct ipsec_traffic trf;
- /* dequeue packets from crypto-queue */
- n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
+ if (app_sa_prm.enable == 0) {
+
+ /* dequeue packets from crypto-queue */
+ n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
RTE_DIM(trf.ipsec.pkts));
- if (n == 0)
- return;
- trf.ip4.num = 0;
- trf.ip6.num = 0;
+ trf.ip4.num = 0;
+ trf.ip6.num = 0;
- /* split traffic by ipv4-ipv6 */
- split46_traffic(&trf, trf.ipsec.pkts, n);
+ /* split traffic by ipv4-ipv6 */
+ split46_traffic(&trf, trf.ipsec.pkts, n);
+ } else
+ ipsec_cqp_process(ctx, &trf);
/* process ipv4 packets */
- route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+ if (trf.ip4.num != 0)
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
/* process ipv6 packets */
- route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
+ if (trf.ip6.num != 0)
+ route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
}
/* main processing loop */
ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT;
}
-static inline int
+int
create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
{
struct rte_cryptodev_info cdev_info;
uint32_t cnt;
} __attribute__((packed));
+struct traffic_type {
+ const uint8_t *data[MAX_PKT_BURST * 2];
+ struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
+ struct ipsec_sa *saptr[MAX_PKT_BURST * 2];
+ uint32_t res[MAX_PKT_BURST * 2];
+ uint32_t num;
+};
+
+struct ipsec_traffic {
+ struct traffic_type ipsec;
+ struct traffic_type ip4;
+ struct traffic_type ip6;
+};
+
uint16_t
ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
uint16_t nb_pkts, uint16_t len);
ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
uint16_t len);
+void
+ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf);
+
+void
+ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf);
+
static inline uint16_t
ipsec_metadata_size(void)
{
void
enqueue_cop_burst(struct cdev_qp *cqp);
+int
+create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
+
#endif /* __IPSEC_H__ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_log.h>
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+#include <rte_mbuf.h>
+
+#include "ipsec.h"
+
+#define SATP_OUT_IPV4(t) \
+ ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
+ (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
+ ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
+
+
+/* helper routine to free bulk of packets */
+static inline void
+free_pkts(struct rte_mbuf *mb[], uint32_t n)
+{
+ uint32_t i;
+
+ for (i = 0; i != n; i++)
+ rte_pktmbuf_free(mb[i]);
+}
+
+/* helper routine to free bulk of crypto-ops and related packets */
+static inline void
+free_cops(struct rte_crypto_op *cop[], uint32_t n)
+{
+ uint32_t i;
+
+ for (i = 0; i != n; i++)
+ rte_pktmbuf_free(cop[i]->sym->m_src);
+}
+
+/* helper routine to enqueue bulk of crypto ops */
+static inline void
+enqueue_cop_bulk(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
+{
+ uint32_t i, k, len, n;
+
+ len = cqp->len;
+
+ /*
+ * if cqp is empty and we have enough ops,
+ * then queue them to the PMD straightway.
+ */
+ if (num >= RTE_DIM(cqp->buf) * 3 / 4 && len == 0) {
+ n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cop, num);
+ cqp->in_flight += n;
+ free_cops(cop + n, num - n);
+ return;
+ }
+
+ k = 0;
+
+ do {
+ n = RTE_DIM(cqp->buf) - len;
+ n = RTE_MIN(num - k, n);
+
+ /* put packets into cqp */
+ for (i = 0; i != n; i++)
+ cqp->buf[len + i] = cop[k + i];
+
+ len += n;
+ k += n;
+
+ /* if cqp is full then, enqueue crypto-ops to PMD */
+ if (len == RTE_DIM(cqp->buf)) {
+ n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
+ cqp->buf, len);
+ cqp->in_flight += n;
+ free_cops(cqp->buf + n, len - n);
+ len = 0;
+ }
+
+
+ } while (k != num);
+
+ cqp->len = len;
+}
+
+static inline int
+fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
+ struct ipsec_sa *sa)
+{
+ int32_t rc;
+
+ /* setup crypto section */
+ if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
+ if (sa->crypto_session == NULL) {
+ rc = create_session(ctx, sa);
+ if (rc != 0)
+ return rc;
+ }
+ ss->crypto.ses = sa->crypto_session;
+ /* setup session action type */
+ } else {
+ if (sa->sec_session == NULL) {
+ rc = create_session(ctx, sa);
+ if (rc != 0)
+ return rc;
+ }
+ ss->security.ses = sa->sec_session;
+ ss->security.ctx = sa->security_ctx;
+ ss->security.ol_flags = sa->ol_flags;
+ }
+
+ rc = rte_ipsec_session_prepare(ss);
+ if (rc != 0)
+ memset(ss, 0, sizeof(*ss));
+
+ return rc;
+}
+
+/*
+ * group input packets byt the SA they belong to.
+ */
+static uint32_t
+sa_group(struct ipsec_sa *sa_ptr[], struct rte_mbuf *pkts[],
+ struct rte_ipsec_group grp[], uint32_t num)
+{
+ uint32_t i, n, spi;
+ void *sa;
+ void * const nosa = &spi;
+
+ sa = nosa;
+ for (i = 0, n = 0; i != num; i++) {
+
+ if (sa != sa_ptr[i]) {
+ grp[n].cnt = pkts + i - grp[n].m;
+ n += (sa != nosa);
+ grp[n].id.ptr = sa_ptr[i];
+ grp[n].m = pkts + i;
+ sa = sa_ptr[i];
+ }
+ }
+
+ /* terminate last group */
+ if (sa != nosa) {
+ grp[n].cnt = pkts + i - grp[n].m;
+ n++;
+ }
+
+ return n;
+}
+
+/*
+ * helper function, splits processed packets into ipv4/ipv6 traffic.
+ */
+static inline void
+copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[],
+ uint32_t num)
+{
+ uint32_t j, ofs, s;
+ struct traffic_type *out;
+
+ /*
+ * determine traffic type(ipv4/ipv6) and offset for ACL classify
+ * based on SA type
+ */
+ if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
+ if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
+ out = &trf->ip4;
+ ofs = offsetof(struct ip, ip_p);
+ } else {
+ out = &trf->ip6;
+ ofs = offsetof(struct ip6_hdr, ip6_nxt);
+ }
+ } else if (SATP_OUT_IPV4(satp)) {
+ out = &trf->ip4;
+ ofs = offsetof(struct ip, ip_p);
+ } else {
+ out = &trf->ip6;
+ ofs = offsetof(struct ip6_hdr, ip6_nxt);
+ }
+
+ for (j = 0, s = out->num; j != num; j++) {
+ out->data[s + j] = rte_pktmbuf_mtod_offset(mb[j],
+ void *, ofs);
+ out->pkts[s + j] = mb[j];
+ }
+
+ out->num += num;
+}
+
+/*
+ * Process ipsec packets.
+ * If packet belong to SA that is subject of inline-crypto,
+ * then process it immediately.
+ * Otherwise do necessary preparations and queue it to related
+ * crypto-dev queue.
+ */
+void
+ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
+{
+ uint64_t satp;
+ uint32_t i, j, k, n;
+ struct ipsec_sa *sa;
+ struct ipsec_mbuf_metadata *priv;
+ struct rte_ipsec_group *pg;
+ struct rte_ipsec_session *ips;
+ struct cdev_qp *cqp;
+ struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
+ struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
+
+ n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
+
+ for (i = 0; i != n; i++) {
+
+ pg = grp + i;
+ sa = pg->id.ptr;
+
+ /* no valid SA found */
+ if (sa == NULL)
+ k = 0;
+
+ ips = &sa->ips;
+ satp = rte_ipsec_sa_type(ips->sa);
+
+ /* no valid HW session for that SA, try to create one */
+ if (ips->crypto.ses == NULL &&
+ fill_ipsec_session(ips, ctx, sa) != 0)
+ k = 0;
+
+ /* process packets inline */
+ else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ sa->type ==
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
+
+ /*
+ * This is just to satisfy inbound_sa_check()
+ * and get_hop_for_offload_pkt().
+ * Should be removed in future.
+ */
+ for (j = 0; j != pg->cnt; j++) {
+ priv = get_priv(pg->m[j]);
+ priv->sa = sa;
+ }
+
+ k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt);
+ copy_to_trf(trf, satp, pg->m, k);
+
+ /* enqueue packets to crypto dev */
+ } else {
+
+ cqp = &ctx->tbl[sa->cdev_id_qp];
+
+ /* for that app each mbuf has it's own crypto op */
+ for (j = 0; j != pg->cnt; j++) {
+ priv = get_priv(pg->m[j]);
+ cop[j] = &priv->cop;
+ /*
+ * this is just to satisfy inbound_sa_check()
+ * should be removed in future.
+ */
+ priv->sa = sa;
+ }
+
+ /* prepare and enqueue crypto ops */
+ k = rte_ipsec_pkt_crypto_prepare(ips, pg->m, cop,
+ pg->cnt);
+ if (k != 0)
+ enqueue_cop_bulk(cqp, cop, k);
+ }
+
+ /* drop packets that cannot be enqueued/processed */
+ if (k != pg->cnt)
+ free_pkts(pg->m + k, pg->cnt - k);
+ }
+}
+
+static inline uint32_t
+cqp_dequeue(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
+{
+ uint32_t n;
+
+ if (cqp->in_flight == 0)
+ return 0;
+
+ n = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, cop, num);
+ RTE_ASSERT(cqp->in_flight >= n);
+ cqp->in_flight -= n;
+
+ return n;
+}
+
+static inline uint32_t
+ctx_dequeue(struct ipsec_ctx *ctx, struct rte_crypto_op *cop[], uint32_t num)
+{
+ uint32_t i, n;
+
+ n = 0;
+
+ for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++)
+ n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
+
+ for (i = 0; n != num && i != ctx->last_qp; i++)
+ n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
+
+ ctx->last_qp = i;
+ return n;
+}
+
+/*
+ * dequeue packets from crypto-queues and finalize processing.
+ */
+void
+ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
+{
+ uint64_t satp;
+ uint32_t i, k, n, ng;
+ struct rte_ipsec_session *ss;
+ struct traffic_type *out;
+ struct rte_ipsec_group *pg;
+ struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
+ struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
+
+ trf->ip4.num = 0;
+ trf->ip6.num = 0;
+
+ out = &trf->ipsec;
+
+ /* dequeue completed crypto-ops */
+ n = ctx_dequeue(ctx, cop, RTE_DIM(cop));
+ if (n == 0)
+ return;
+
+ /* group them by ipsec session */
+ ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **)
+ (uintptr_t)cop, out->pkts, grp, n);
+
+ /* process each group of packets */
+ for (i = 0; i != ng; i++) {
+
+ pg = grp + i;
+ ss = pg->id.ptr;
+ satp = rte_ipsec_sa_type(ss->sa);
+
+ k = rte_ipsec_pkt_process(ss, pg->m, pg->cnt);
+ copy_to_trf(trf, satp, pg->m, k);
+
+ /* free bad packets, if any */
+ free_pkts(pg->m + k, pg->cnt - k);
+
+ n -= pg->cnt;
+ }
+
+ /* we should never have packet with unknown SA here */
+ RTE_VERIFY(n == 0);
+}
deps += ['security', 'lpm', 'acl', 'hash', 'ipsec']
allow_experimental_apis = true
sources = files(
- 'esp.c', 'ipsec.c', 'ipsec-secgw.c', 'parser.c',
- 'rt.c', 'sa.c', 'sp4.c', 'sp6.c'
+ 'esp.c', 'ipsec.c', 'ipsec_process.c', 'ipsec-secgw.c',
+ 'parser.c', 'rt.c', 'sa.c', 'sp4.c', 'sp6.c'
)