1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
8 #include <rte_branch_prediction.h>
10 #include <rte_cryptodev.h>
11 #include <rte_ethdev.h>
15 #include "ipsec-secgw.h"
16 #include "ipsec_worker.h"
18 /* helper routine to free bulk of crypto-ops and related packets */
20 free_cops(struct rte_crypto_op *cop[], uint32_t n)
24 for (i = 0; i != n; i++)
25 rte_pktmbuf_free(cop[i]->sym->m_src);
28 /* helper routine to enqueue bulk of crypto ops */
30 enqueue_cop_bulk(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
32 uint32_t i, k, len, n;
37 * if cqp is empty and we have enough ops,
38 * then queue them to the PMD straightway.
40 if (num >= RTE_DIM(cqp->buf) * 3 / 4 && len == 0) {
41 n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cop, num);
43 free_cops(cop + n, num - n);
50 n = RTE_DIM(cqp->buf) - len;
51 n = RTE_MIN(num - k, n);
53 /* put packets into cqp */
54 for (i = 0; i != n; i++)
55 cqp->buf[len + i] = cop[k + i];
60 /* if cqp is full then, enqueue crypto-ops to PMD */
61 if (len == RTE_DIM(cqp->buf)) {
62 n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
65 free_cops(cqp->buf + n, len - n);
76 check_ipsec_session(const struct rte_ipsec_session *ss)
78 if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE ||
79 ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
80 if (ss->crypto.ses == NULL)
82 } else if (ss->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
83 if (ss->security.ses == NULL)
91 * group input packets byt the SA they belong to.
94 sa_group(void *sa_ptr[], struct rte_mbuf *pkts[],
95 struct rte_ipsec_group grp[], uint32_t num)
99 void * const nosa = &spi;
103 for (i = 0, n = 0; i != num; i++) {
105 if (sa != sa_ptr[i]) {
106 grp[n].cnt = pkts + i - grp[n].m;
108 grp[n].id.ptr = sa_ptr[i];
114 /* terminate last group */
116 grp[n].cnt = pkts + i - grp[n].m;
124 * helper function, splits processed packets into ipv4/ipv6 traffic.
127 copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[],
131 struct traffic_type *out;
134 * determine traffic type(ipv4/ipv6) and offset for ACL classify
137 if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
138 if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
140 ofs = offsetof(struct ip, ip_p);
143 ofs = offsetof(struct ip6_hdr, ip6_nxt);
145 } else if (SATP_OUT_IPV4(satp)) {
147 ofs = offsetof(struct ip, ip_p);
150 ofs = offsetof(struct ip6_hdr, ip6_nxt);
153 for (j = 0, s = out->num; j != num; j++) {
154 out->data[s + j] = rte_pktmbuf_mtod_offset(mb[j],
156 out->pkts[s + j] = mb[j];
163 ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
164 struct rte_ipsec_session *ips, struct rte_mbuf **m,
168 struct rte_crypto_op *cop[cnt];
170 struct ipsec_mbuf_metadata *priv;
172 cqp = sa->cqp[ctx->lcore_id];
174 /* for that app each mbuf has it's own crypto op */
175 for (j = 0; j != cnt; j++) {
176 priv = get_priv(m[j]);
179 * this is just to satisfy inbound_sa_check()
180 * should be removed in future.
185 /* prepare and enqueue crypto ops */
186 k = rte_ipsec_pkt_crypto_prepare(ips, m, cop, cnt);
188 enqueue_cop_bulk(cqp, cop, k);
194 * finish processing of packets successfully decrypted by an inline processor
197 ipsec_process_inline_group(struct rte_ipsec_session *ips, void *sa,
198 struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt)
204 satp = rte_ipsec_sa_type(ips->sa);
205 prep_process_group(sa, mb, cnt);
207 k = rte_ipsec_pkt_process(ips, mb, cnt);
208 copy_to_trf(trf, satp, mb, k);
213 * process packets synchronously
216 ipsec_process_cpu_group(struct rte_ipsec_session *ips, void *sa,
217 struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt)
223 satp = rte_ipsec_sa_type(ips->sa);
224 prep_process_group(sa, mb, cnt);
226 k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt);
227 k = rte_ipsec_pkt_process(ips, mb, k);
228 copy_to_trf(trf, satp, mb, k);
233 * Process ipsec packets.
234 * If packet belong to SA that is subject of inline-crypto,
235 * then process it immediately.
236 * Otherwise do necessary preparations and queue it to related
240 ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
244 struct rte_ipsec_group *pg;
245 struct rte_ipsec_session *ips;
246 struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
248 n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
250 for (i = 0; i != n; i++) {
253 sa = ipsec_mask_saptr(pg->id.ptr);
255 /* fallback to cryptodev with RX packets which inline
256 * processor was unable to process
259 ips = (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ?
260 ipsec_get_fallback_session(sa) :
261 ipsec_get_primary_session(sa);
263 /* no valid HW session for that SA */
264 if (sa == NULL || unlikely(check_ipsec_session(ips) != 0))
267 /* process packets inline */
270 /* enqueue packets to crypto dev */
271 case RTE_SECURITY_ACTION_TYPE_NONE:
272 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
273 k = ipsec_prepare_crypto_group(ctx, sa, ips,
276 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
277 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
278 k = ipsec_process_inline_group(ips, sa,
279 trf, pg->m, pg->cnt);
281 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
282 k = ipsec_process_cpu_group(ips, sa,
283 trf, pg->m, pg->cnt);
290 /* drop packets that cannot be enqueued/processed */
292 free_pkts(pg->m + k, pg->cnt - k);
296 static inline uint32_t
297 cqp_dequeue(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
301 if (cqp->in_flight == 0)
304 n = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, cop, num);
305 RTE_ASSERT(cqp->in_flight >= n);
311 static inline uint32_t
312 ctx_dequeue(struct ipsec_ctx *ctx, struct rte_crypto_op *cop[], uint32_t num)
318 for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++)
319 n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
321 for (i = 0; n != num && i != ctx->last_qp; i++)
322 n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
329 * dequeue packets from crypto-queues and finalize processing.
332 ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
335 uint32_t i, k, n, ng;
336 struct rte_ipsec_session *ss;
337 struct traffic_type *out;
338 struct rte_ipsec_group *pg;
339 struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
340 struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
347 /* dequeue completed crypto-ops */
348 n = ctx_dequeue(ctx, cop, RTE_DIM(cop));
352 /* group them by ipsec session */
353 ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **)
354 (uintptr_t)cop, out->pkts, grp, n);
356 /* process each group of packets */
357 for (i = 0; i != ng; i++) {
361 satp = rte_ipsec_sa_type(ss->sa);
363 k = rte_ipsec_pkt_process(ss, pg->m, pg->cnt);
364 copy_to_trf(trf, satp, pg->m, k);
366 /* free bad packets, if any */
367 free_pkts(pg->m + k, pg->cnt - k);
372 /* we should never have packet with unknown SA here */