1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
8 #include <rte_branch_prediction.h>
10 #include <rte_cryptodev.h>
11 #include <rte_ethdev.h>
16 #define SATP_OUT_IPV4(t) \
17 ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
18 (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
19 ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
21 /* helper routine to free bulk of packets */
23 free_pkts(struct rte_mbuf *mb[], uint32_t n)
27 for (i = 0; i != n; i++)
28 rte_pktmbuf_free(mb[i]);
31 /* helper routine to free bulk of crypto-ops and related packets */
33 free_cops(struct rte_crypto_op *cop[], uint32_t n)
37 for (i = 0; i != n; i++)
38 rte_pktmbuf_free(cop[i]->sym->m_src);
41 /* helper routine to enqueue bulk of crypto ops */
43 enqueue_cop_bulk(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
45 uint32_t i, k, len, n;
50 * if cqp is empty and we have enough ops,
51 * then queue them to the PMD straightway.
53 if (num >= RTE_DIM(cqp->buf) * 3 / 4 && len == 0) {
54 n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cop, num);
56 free_cops(cop + n, num - n);
63 n = RTE_DIM(cqp->buf) - len;
64 n = RTE_MIN(num - k, n);
66 /* put packets into cqp */
67 for (i = 0; i != n; i++)
68 cqp->buf[len + i] = cop[k + i];
73 /* if cqp is full then, enqueue crypto-ops to PMD */
74 if (len == RTE_DIM(cqp->buf)) {
75 n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
78 free_cops(cqp->buf + n, len - n);
89 fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
94 /* setup crypto section */
95 if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
96 RTE_ASSERT(ss->crypto.ses == NULL);
97 rc = create_lookaside_session(ctx, sa, ss);
100 /* setup session action type */
101 } else if (ss->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
102 RTE_ASSERT(ss->security.ses == NULL);
103 rc = create_lookaside_session(ctx, sa, ss);
109 rc = rte_ipsec_session_prepare(ss);
111 memset(ss, 0, sizeof(*ss));
117 * group input packets byt the SA they belong to.
120 sa_group(void *sa_ptr[], struct rte_mbuf *pkts[],
121 struct rte_ipsec_group grp[], uint32_t num)
125 void * const nosa = &spi;
128 for (i = 0, n = 0; i != num; i++) {
130 if (sa != sa_ptr[i]) {
131 grp[n].cnt = pkts + i - grp[n].m;
133 grp[n].id.ptr = sa_ptr[i];
139 /* terminate last group */
141 grp[n].cnt = pkts + i - grp[n].m;
149 * helper function, splits processed packets into ipv4/ipv6 traffic.
152 copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[],
156 struct traffic_type *out;
159 * determine traffic type(ipv4/ipv6) and offset for ACL classify
162 if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
163 if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
165 ofs = offsetof(struct ip, ip_p);
168 ofs = offsetof(struct ip6_hdr, ip6_nxt);
170 } else if (SATP_OUT_IPV4(satp)) {
172 ofs = offsetof(struct ip, ip_p);
175 ofs = offsetof(struct ip6_hdr, ip6_nxt);
178 for (j = 0, s = out->num; j != num; j++) {
179 out->data[s + j] = rte_pktmbuf_mtod_offset(mb[j],
181 out->pkts[s + j] = mb[j];
188 ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
189 struct rte_ipsec_session *ips, struct rte_mbuf **m,
193 struct rte_crypto_op *cop[cnt];
195 struct ipsec_mbuf_metadata *priv;
197 cqp = &ctx->tbl[sa->cdev_id_qp];
199 /* for that app each mbuf has it's own crypto op */
200 for (j = 0; j != cnt; j++) {
201 priv = get_priv(m[j]);
204 * this is just to satisfy inbound_sa_check()
205 * should be removed in future.
210 /* prepare and enqueue crypto ops */
211 k = rte_ipsec_pkt_crypto_prepare(ips, m, cop, cnt);
213 enqueue_cop_bulk(cqp, cop, k);
219 * Process ipsec packets.
220 * If packet belong to SA that is subject of inline-crypto,
221 * then process it immediately.
222 * Otherwise do necessary preparations and queue it to related
226 ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
231 struct ipsec_mbuf_metadata *priv;
232 struct rte_ipsec_group *pg;
233 struct rte_ipsec_session *ips;
234 struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
236 n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
238 for (i = 0; i != n; i++) {
240 sa = ipsec_mask_saptr(pg->id.ptr);
242 ips = ipsec_get_primary_session(sa);
244 /* no valid HW session for that SA, try to create one */
245 if (sa == NULL || (ips->crypto.ses == NULL &&
246 fill_ipsec_session(ips, ctx, sa) != 0))
249 /* process packets inline */
250 else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
252 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
255 satp = rte_ipsec_sa_type(ips->sa);
258 * This is just to satisfy inbound_sa_check()
259 * and get_hop_for_offload_pkt().
260 * Should be removed in future.
262 for (j = 0; j != pg->cnt; j++) {
263 priv = get_priv(pg->m[j]);
267 /* fallback to cryptodev with RX packets which inline
268 * processor was unable to process
270 if (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) {
271 /* offload packets to cryptodev */
272 struct rte_ipsec_session *fallback;
274 fallback = ipsec_get_fallback_session(sa);
275 if (fallback->crypto.ses == NULL &&
276 fill_ipsec_session(fallback, ctx, sa)
280 k = ipsec_prepare_crypto_group(ctx, sa,
281 fallback, pg->m, pg->cnt);
283 /* finish processing of packets successfully
284 * decrypted by an inline processor
286 k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt);
287 copy_to_trf(trf, satp, pg->m, k);
290 /* enqueue packets to crypto dev */
292 k = ipsec_prepare_crypto_group(ctx, sa, ips, pg->m,
296 /* drop packets that cannot be enqueued/processed */
298 free_pkts(pg->m + k, pg->cnt - k);
302 static inline uint32_t
303 cqp_dequeue(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
307 if (cqp->in_flight == 0)
310 n = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, cop, num);
311 RTE_ASSERT(cqp->in_flight >= n);
317 static inline uint32_t
318 ctx_dequeue(struct ipsec_ctx *ctx, struct rte_crypto_op *cop[], uint32_t num)
324 for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++)
325 n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
327 for (i = 0; n != num && i != ctx->last_qp; i++)
328 n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
335 * dequeue packets from crypto-queues and finalize processing.
338 ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
341 uint32_t i, k, n, ng;
342 struct rte_ipsec_session *ss;
343 struct traffic_type *out;
344 struct rte_ipsec_group *pg;
345 struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
346 struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
353 /* dequeue completed crypto-ops */
354 n = ctx_dequeue(ctx, cop, RTE_DIM(cop));
358 /* group them by ipsec session */
359 ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **)
360 (uintptr_t)cop, out->pkts, grp, n);
362 /* process each group of packets */
363 for (i = 0; i != ng; i++) {
367 satp = rte_ipsec_sa_type(ss->sa);
369 k = rte_ipsec_pkt_process(ss, pg->m, pg->cnt);
370 copy_to_trf(trf, satp, pg->m, k);
372 /* free bad packets, if any */
373 free_pkts(pg->m + k, pg->cnt - k);
378 /* we should never have packet with unknown SA here */