1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
8 #include <rte_branch_prediction.h>
10 #include <rte_cryptodev.h>
11 #include <rte_ethdev.h>
16 #define SATP_OUT_IPV4(t) \
17 ((((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TRANS && \
18 (((t) & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4)) || \
19 ((t) & RTE_IPSEC_SATP_MODE_MASK) == RTE_IPSEC_SATP_MODE_TUNLV4)
22 /* helper routine to free bulk of packets */
24 free_pkts(struct rte_mbuf *mb[], uint32_t n)
28 for (i = 0; i != n; i++)
29 rte_pktmbuf_free(mb[i]);
32 /* helper routine to free bulk of crypto-ops and related packets */
34 free_cops(struct rte_crypto_op *cop[], uint32_t n)
38 for (i = 0; i != n; i++)
39 rte_pktmbuf_free(cop[i]->sym->m_src);
42 /* helper routine to enqueue bulk of crypto ops */
44 enqueue_cop_bulk(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
46 uint32_t i, k, len, n;
51 * if cqp is empty and we have enough ops,
52 * then queue them to the PMD straightway.
54 if (num >= RTE_DIM(cqp->buf) * 3 / 4 && len == 0) {
55 n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cop, num);
57 free_cops(cop + n, num - n);
64 n = RTE_DIM(cqp->buf) - len;
65 n = RTE_MIN(num - k, n);
67 /* put packets into cqp */
68 for (i = 0; i != n; i++)
69 cqp->buf[len + i] = cop[k + i];
74 /* if cqp is full then, enqueue crypto-ops to PMD */
75 if (len == RTE_DIM(cqp->buf)) {
76 n = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
79 free_cops(cqp->buf + n, len - n);
90 fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
95 /* setup crypto section */
96 if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
97 if (sa->crypto_session == NULL) {
98 rc = create_lookaside_session(ctx, sa);
102 ss->crypto.ses = sa->crypto_session;
103 /* setup session action type */
104 } else if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
105 if (sa->sec_session == NULL) {
106 rc = create_lookaside_session(ctx, sa);
110 ss->security.ses = sa->sec_session;
111 ss->security.ctx = sa->security_ctx;
112 ss->security.ol_flags = sa->ol_flags;
116 rc = rte_ipsec_session_prepare(ss);
118 memset(ss, 0, sizeof(*ss));
124 * group input packets byt the SA they belong to.
127 sa_group(struct ipsec_sa *sa_ptr[], struct rte_mbuf *pkts[],
128 struct rte_ipsec_group grp[], uint32_t num)
132 void * const nosa = &spi;
135 for (i = 0, n = 0; i != num; i++) {
137 if (sa != sa_ptr[i]) {
138 grp[n].cnt = pkts + i - grp[n].m;
140 grp[n].id.ptr = sa_ptr[i];
146 /* terminate last group */
148 grp[n].cnt = pkts + i - grp[n].m;
156 * helper function, splits processed packets into ipv4/ipv6 traffic.
159 copy_to_trf(struct ipsec_traffic *trf, uint64_t satp, struct rte_mbuf *mb[],
163 struct traffic_type *out;
166 * determine traffic type(ipv4/ipv6) and offset for ACL classify
169 if ((satp & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
170 if ((satp & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) {
172 ofs = offsetof(struct ip, ip_p);
175 ofs = offsetof(struct ip6_hdr, ip6_nxt);
177 } else if (SATP_OUT_IPV4(satp)) {
179 ofs = offsetof(struct ip, ip_p);
182 ofs = offsetof(struct ip6_hdr, ip6_nxt);
185 for (j = 0, s = out->num; j != num; j++) {
186 out->data[s + j] = rte_pktmbuf_mtod_offset(mb[j],
188 out->pkts[s + j] = mb[j];
195 * Process ipsec packets.
196 * If packet belong to SA that is subject of inline-crypto,
197 * then process it immediately.
198 * Otherwise do necessary preparations and queue it to related
202 ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
207 struct ipsec_mbuf_metadata *priv;
208 struct rte_ipsec_group *pg;
209 struct rte_ipsec_session *ips;
211 struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
212 struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
214 n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
216 for (i = 0; i != n; i++) {
223 /* no valid HW session for that SA, try to create one */
224 if (sa == NULL || (ips->crypto.ses == NULL &&
225 fill_ipsec_session(ips, ctx, sa) != 0))
228 /* process packets inline */
229 else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
231 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
233 satp = rte_ipsec_sa_type(ips->sa);
236 * This is just to satisfy inbound_sa_check()
237 * and get_hop_for_offload_pkt().
238 * Should be removed in future.
240 for (j = 0; j != pg->cnt; j++) {
241 priv = get_priv(pg->m[j]);
245 k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt);
246 copy_to_trf(trf, satp, pg->m, k);
248 /* enqueue packets to crypto dev */
251 cqp = &ctx->tbl[sa->cdev_id_qp];
253 /* for that app each mbuf has it's own crypto op */
254 for (j = 0; j != pg->cnt; j++) {
255 priv = get_priv(pg->m[j]);
258 * this is just to satisfy inbound_sa_check()
259 * should be removed in future.
264 /* prepare and enqueue crypto ops */
265 k = rte_ipsec_pkt_crypto_prepare(ips, pg->m, cop,
268 enqueue_cop_bulk(cqp, cop, k);
271 /* drop packets that cannot be enqueued/processed */
273 free_pkts(pg->m + k, pg->cnt - k);
277 static inline uint32_t
278 cqp_dequeue(struct cdev_qp *cqp, struct rte_crypto_op *cop[], uint32_t num)
282 if (cqp->in_flight == 0)
285 n = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp, cop, num);
286 RTE_ASSERT(cqp->in_flight >= n);
292 static inline uint32_t
293 ctx_dequeue(struct ipsec_ctx *ctx, struct rte_crypto_op *cop[], uint32_t num)
299 for (i = ctx->last_qp; n != num && i != ctx->nb_qps; i++)
300 n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
302 for (i = 0; n != num && i != ctx->last_qp; i++)
303 n += cqp_dequeue(ctx->tbl + i, cop + n, num - n);
310 * dequeue packets from crypto-queues and finalize processing.
313 ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
316 uint32_t i, k, n, ng;
317 struct rte_ipsec_session *ss;
318 struct traffic_type *out;
319 struct rte_ipsec_group *pg;
320 struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)];
321 struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
328 /* dequeue completed crypto-ops */
329 n = ctx_dequeue(ctx, cop, RTE_DIM(cop));
333 /* group them by ipsec session */
334 ng = rte_ipsec_pkt_crypto_group((const struct rte_crypto_op **)
335 (uintptr_t)cop, out->pkts, grp, n);
337 /* process each group of packets */
338 for (i = 0; i != ng; i++) {
342 satp = rte_ipsec_sa_type(ss->sa);
344 k = rte_ipsec_pkt_process(ss, pg->m, pg->cnt);
345 copy_to_trf(trf, satp, pg->m, k);
347 /* free bad packets, if any */
348 free_pkts(pg->m + k, pg->cnt - k);
353 /* we should never have packet with unknown SA here */