4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/types.h>
34 #include <netinet/in.h>
35 #include <netinet/ip.h>
37 #include <rte_branch_prediction.h>
39 #include <rte_crypto.h>
40 #include <rte_cryptodev.h>
48 create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
50 unsigned long cdev_id_qp = 0;
52 struct cdev_key key = { 0 };
54 key.lcore_id = (uint8_t)rte_lcore_id();
56 key.cipher_algo = (uint8_t)sa->cipher_algo;
57 key.auth_algo = (uint8_t)sa->auth_algo;
59 ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
60 (void **)&cdev_id_qp);
62 RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
63 "auth_algo %u\n", key.lcore_id, key.cipher_algo,
68 RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
69 "%u qp %u\n", sa->spi, ipsec_ctx->tbl[cdev_id_qp].id,
70 ipsec_ctx->tbl[cdev_id_qp].qp);
72 sa->crypto_session = rte_cryptodev_sym_session_create(
73 ipsec_ctx->tbl[cdev_id_qp].id, sa->xforms);
75 sa->cdev_id_qp = cdev_id_qp;
81 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
85 cqp->buf[cqp->len++] = cop;
87 if (cqp->len == MAX_PKT_BURST) {
88 ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
91 RTE_LOG(DEBUG, IPSEC, "Cryptodev %u queue %u:"
92 " enqueued %u crypto ops out of %u\n",
95 for (i = ret; i < cqp->len; i++)
96 rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
98 cqp->in_flight += ret;
104 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
105 struct rte_mbuf *pkts[], struct ipsec_sa *sas[],
109 struct ipsec_mbuf_metadata *priv;
112 for (i = 0; i < nb_pkts; i++) {
113 if (unlikely(sas[i] == NULL)) {
114 rte_pktmbuf_free(pkts[i]);
118 rte_prefetch0(sas[i]);
119 rte_prefetch0(pkts[i]);
121 priv = get_priv(pkts[i]);
125 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
127 rte_prefetch0(&priv->sym_cop);
128 priv->cop.sym = &priv->sym_cop;
130 if ((unlikely(sa->crypto_session == NULL)) &&
131 create_session(ipsec_ctx, sa)) {
132 rte_pktmbuf_free(pkts[i]);
136 rte_crypto_op_attach_sym_session(&priv->cop,
139 ret = xform_func(pkts[i], sa, &priv->cop);
141 rte_pktmbuf_free(pkts[i]);
145 RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
146 enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
151 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
152 struct rte_mbuf *pkts[], uint16_t max_pkts)
154 int nb_pkts = 0, ret = 0, i, j, nb_cops;
155 struct ipsec_mbuf_metadata *priv;
156 struct rte_crypto_op *cops[max_pkts];
158 struct rte_mbuf *pkt;
160 for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
163 cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
164 if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
165 ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
167 if (cqp->in_flight == 0)
170 nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
171 cops, max_pkts - nb_pkts);
173 cqp->in_flight -= nb_cops;
175 for (j = 0; j < nb_cops; j++) {
176 pkt = cops[j]->sym->m_src;
179 priv = get_priv(pkt);
182 RTE_ASSERT(sa != NULL);
184 ret = xform_func(pkt, sa, cops[j]);
186 rte_pktmbuf_free(pkt);
188 pkts[nb_pkts++] = pkt;
197 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
198 uint16_t nb_pkts, uint16_t len)
200 struct ipsec_sa *sas[nb_pkts];
202 inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
204 ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
206 return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
210 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
211 uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
213 struct ipsec_sa *sas[nb_pkts];
215 outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
217 ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
219 return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);