1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
8 #include <rte_branch_prediction.h>
10 #include <rte_cryptodev.h>
11 #include <rte_ethdev.h>
16 #define SATP_OUT_IPV4(t) \
17 ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
18 (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
19 ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
21 /* helper routine to free bulk of packets */
23 free_pkts(struct rte_mbuf *mb[], uint32_t n)
27 for (i = 0; i != n; i++)
28 rte_pktmbuf_free(mb[i]);
31 /* helper routine to free bulk of crypto-ops and related packets */
33 free_cops(struct rte_crypto_op *cop[], uint32_t n)
37 for (i = 0; i != n; i++)
38 rte_pktmbuf_free(cop[i]->sym->m_src);
41 /* helper routine to enqueue bulk of crypto ops */
43 enqueue_cop_bulk(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
45 uint32_t i, k, len, n;
50 * if cqp is empty and we have enough ops,
51 * then queue them to the PMD straightway.
53 if (num >= RTE_DIM(cqp->buf) * 3 / 4 && len == 0) {
54 n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cop, num);
56 free_cops(cop + n, num - n);
63 n = RTE_DIM(cqp->buf) - len;
64 n = RTE_MIN(num - k, n);
66 /* put packets into cqp */
67 for (i = 0; i != n; i++)
68 cqp->buf[len + i] = cop[k + i];
73 /* if cqp is full then, enqueue crypto-ops to PMD */
74 if (len == RTE_DIM(cqp->buf)) {
75 n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
78 free_cops(cqp->buf + n, len - n);
89 fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
94 /* setup crypto section */
95 if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE ||
96 ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
97 RTE_ASSERT(ss->crypto.ses == NULL);
98 rc = create_lookaside_session(ctx, sa, ss);
101 /* setup session action type */
102 } else if (ss->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
103 RTE_ASSERT(ss->security.ses == NULL);
104 rc = create_lookaside_session(ctx, sa, ss);
110 rc = rte_ipsec_session_prepare(ss);
112 memset(ss, 0, sizeof(*ss));
118 * group input packets byt the SA they belong to.
121 sa_group(void *sa_ptr[], struct rte_mbuf *pkts[],
122 struct rte_ipsec_group grp[], uint32_t num)
126 void * const nosa = &spi;
130 for (i = 0, n = 0; i != num; i++) {
132 if (sa != sa_ptr[i]) {
133 grp[n].cnt = pkts + i - grp[n].m;
135 grp[n].id.ptr = sa_ptr[i];
141 /* terminate last group */
143 grp[n].cnt = pkts + i - grp[n].m;
151 * helper function, splits processed packets into ipv4/ipv6 traffic.
154 copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[],
158 struct traffic_type *out;
161 * determine traffic type(ipv4/ipv6) and offset for ACL classify
164 if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
165 if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
167 ofs = offsetof(struct ip, ip_p);
170 ofs = offsetof(struct ip6_hdr, ip6_nxt);
172 } else if (SATP_OUT_IPV4(satp)) {
174 ofs = offsetof(struct ip, ip_p);
177 ofs = offsetof(struct ip6_hdr, ip6_nxt);
180 for (j = 0, s = out->num; j != num; j++) {
181 out->data[s + j] = rte_pktmbuf_mtod_offset(mb[j],
183 out->pkts[s + j] = mb[j];
190 ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
191 struct rte_ipsec_session *ips, struct rte_mbuf **m,
195 struct rte_crypto_op *cop[cnt];
197 struct ipsec_mbuf_metadata *priv;
199 cqp = &ctx->tbl[sa->cdev_id_qp];
201 /* for that app each mbuf has it's own crypto op */
202 for (j = 0; j != cnt; j++) {
203 priv = get_priv(m[j]);
206 * this is just to satisfy inbound_sa_check()
207 * should be removed in future.
212 /* prepare and enqueue crypto ops */
213 k = rte_ipsec_pkt_crypto_prepare(ips, m, cop, cnt);
215 enqueue_cop_bulk(cqp, cop, k);
221 * helper routine for inline and cpu(synchronous) processing
222 * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
223 * Should be removed in future.
226 prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
229 struct ipsec_mbuf_metadata *priv;
231 for (j = 0; j != cnt; j++) {
232 priv = get_priv(mb[j]);
238 * finish processing of packets successfully decrypted by an inline processor
241 ipsec_process_inline_group(struct rte_ipsec_session *ips, void *sa,
242 struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt)
248 satp = rte_ipsec_sa_type(ips->sa);
249 prep_process_group(sa, mb, cnt);
251 k = rte_ipsec_pkt_process(ips, mb, cnt);
252 copy_to_trf(trf, satp, mb, k);
257 * process packets synchronously
260 ipsec_process_cpu_group(struct rte_ipsec_session *ips, void *sa,
261 struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt)
267 satp = rte_ipsec_sa_type(ips->sa);
268 prep_process_group(sa, mb, cnt);
270 k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt);
271 k = rte_ipsec_pkt_process(ips, mb, k);
272 copy_to_trf(trf, satp, mb, k);
277 * Process ipsec packets.
278 * If packet belong to SA that is subject of inline-crypto,
279 * then process it immediately.
280 * Otherwise do necessary preparations and queue it to related
284 ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
288 struct rte_ipsec_group *pg;
289 struct rte_ipsec_session *ips;
290 struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
292 n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
294 for (i = 0; i != n; i++) {
297 sa = ipsec_mask_saptr(pg->id.ptr);
299 /* fallback to cryptodev with RX packets which inline
300 * processor was unable to process
303 ips = (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ?
304 ipsec_get_fallback_session(sa) :
305 ipsec_get_primary_session(sa);
307 /* no valid HW session for that SA, try to create one */
308 if (sa == NULL || (ips->crypto.ses == NULL &&
309 fill_ipsec_session(ips, ctx, sa) != 0))
312 /* process packets inline */
315 /* enqueue packets to crypto dev */
316 case RTE_SECURITY_ACTION_TYPE_NONE:
317 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
318 k = ipsec_prepare_crypto_group(ctx, sa, ips,
321 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
322 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
323 k = ipsec_process_inline_group(ips, sa,
324 trf, pg->m, pg->cnt);
326 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
327 k = ipsec_process_cpu_group(ips, sa,
328 trf, pg->m, pg->cnt);
335 /* drop packets that cannot be enqueued/processed */
337 free_pkts(pg->m + k, pg->cnt - k);
341 static inline uint32_t
342 cqp_dequeue(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
346 if (cqp->in_flight == 0)
349 n = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, cop, num);
350 RTE_ASSERT(cqp->in_flight >= n);
356 static inline uint32_t
357 ctx_dequeue(struct ipsec_ctx *ctx, struct rte_crypto_op *cop[], uint32_t num)
363 for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++)
364 n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
366 for (i = 0; n != num && i != ctx->last_qp; i++)
367 n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
374 * dequeue packets from crypto-queues and finalize processing.
377 ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
380 uint32_t i, k, n, ng;
381 struct rte_ipsec_session *ss;
382 struct traffic_type *out;
383 struct rte_ipsec_group *pg;
384 struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
385 struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
392 /* dequeue completed crypto-ops */
393 n = ctx_dequeue(ctx, cop, RTE_DIM(cop));
397 /* group them by ipsec session */
398 ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **)
399 (uintptr_t)cop, out->pkts, grp, n);
401 /* process each group of packets */
402 for (i = 0; i != ng; i++) {
406 satp = rte_ipsec_sa_type(ss->sa);
408 k = rte_ipsec_pkt_process(ss, pg->m, pg->cnt);
409 copy_to_trf(trf, satp, pg->m, k);
411 /* free bad packets, if any */
412 free_pkts(pg->m + k, pg->cnt - k);
417 /* we should never have packet with unknown SA here */