crypto/dpaa2_sec: support protocol offload IPsec
[dpdk.git] / examples / ipsec-secgw / ipsec.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <sys/types.h>
34 #include <netinet/in.h>
35 #include <netinet/ip.h>
36
37 #include <rte_branch_prediction.h>
38 #include <rte_log.h>
39 #include <rte_crypto.h>
40 #include <rte_cryptodev.h>
41 #include <rte_mbuf.h>
42 #include <rte_hash.h>
43
44 #include "ipsec.h"
45 #include "esp.h"
46
47 static inline int
48 create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
49 {
50         struct rte_cryptodev_info cdev_info;
51         unsigned long cdev_id_qp = 0;
52         int32_t ret;
53         struct cdev_key key = { 0 };
54
55         key.lcore_id = (uint8_t)rte_lcore_id();
56
57         key.cipher_algo = (uint8_t)sa->cipher_algo;
58         key.auth_algo = (uint8_t)sa->auth_algo;
59         key.aead_algo = (uint8_t)sa->aead_algo;
60
61         ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
62                         (void **)&cdev_id_qp);
63         if (ret < 0) {
64                 RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
65                         "auth_algo %u, aead_algo %u\n",
66                         key.lcore_id,
67                         key.cipher_algo,
68                         key.auth_algo,
69                         key.aead_algo);
70                 return -1;
71         }
72
73         RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
74                         "%u qp %u\n", sa->spi,
75                         ipsec_ctx->tbl[cdev_id_qp].id,
76                         ipsec_ctx->tbl[cdev_id_qp].qp);
77
78         sa->crypto_session = rte_cryptodev_sym_session_create(
79                         ipsec_ctx->session_pool);
80         rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
81                         sa->crypto_session, sa->xforms,
82                         ipsec_ctx->session_pool);
83
84         rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info);
85         if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
86                 ret = rte_cryptodev_queue_pair_attach_sym_session(
87                                 ipsec_ctx->tbl[cdev_id_qp].id,
88                                 ipsec_ctx->tbl[cdev_id_qp].qp,
89                                 sa->crypto_session);
90                 if (ret < 0) {
91                         RTE_LOG(ERR, IPSEC,
92                                 "Session cannot be attached to qp %u ",
93                                 ipsec_ctx->tbl[cdev_id_qp].qp);
94                         return -1;
95                 }
96         }
97         sa->cdev_id_qp = cdev_id_qp;
98
99         return 0;
100 }
101
102 static inline void
103 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
104 {
105         int32_t ret, i;
106
107         cqp->buf[cqp->len++] = cop;
108
109         if (cqp->len == MAX_PKT_BURST) {
110                 ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
111                                 cqp->buf, cqp->len);
112                 if (ret < cqp->len) {
113                         RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
114                                         " enqueued %u crypto ops out of %u\n",
115                                          cqp->id, cqp->qp,
116                                          ret, cqp->len);
117                         for (i = ret; i < cqp->len; i++)
118                                 rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
119                 }
120                 cqp->in_flight += ret;
121                 cqp->len = 0;
122         }
123 }
124
125 static inline void
126 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
127                 struct rte_mbuf *pkts[], struct ipsec_sa *sas[],
128                 uint16_t nb_pkts)
129 {
130         int32_t ret = 0, i;
131         struct ipsec_mbuf_metadata *priv;
132         struct ipsec_sa *sa;
133
134         for (i = 0; i < nb_pkts; i++) {
135                 if (unlikely(sas[i] == NULL)) {
136                         rte_pktmbuf_free(pkts[i]);
137                         continue;
138                 }
139
140                 rte_prefetch0(sas[i]);
141                 rte_prefetch0(pkts[i]);
142
143                 priv = get_priv(pkts[i]);
144                 sa = sas[i];
145                 priv->sa = sa;
146
147                 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
148                 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
149
150                 rte_prefetch0(&priv->sym_cop);
151
152                 if ((unlikely(sa->crypto_session == NULL)) &&
153                                 create_session(ipsec_ctx, sa)) {
154                         rte_pktmbuf_free(pkts[i]);
155                         continue;
156                 }
157
158                 rte_crypto_op_attach_sym_session(&priv->cop,
159                                 sa->crypto_session);
160
161                 ret = xform_func(pkts[i], sa, &priv->cop);
162                 if (unlikely(ret)) {
163                         rte_pktmbuf_free(pkts[i]);
164                         continue;
165                 }
166
167                 RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
168                 enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
169         }
170 }
171
172 static inline int
173 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
174                 struct rte_mbuf *pkts[], uint16_t max_pkts)
175 {
176         int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
177         struct ipsec_mbuf_metadata *priv;
178         struct rte_crypto_op *cops[max_pkts];
179         struct ipsec_sa *sa;
180         struct rte_mbuf *pkt;
181
182         for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
183                 struct cdev_qp *cqp;
184
185                 cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
186                 if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
187                         ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
188
189                 if (cqp->in_flight == 0)
190                         continue;
191
192                 nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
193                                 cops, max_pkts - nb_pkts);
194
195                 cqp->in_flight -= nb_cops;
196
197                 for (j = 0; j < nb_cops; j++) {
198                         pkt = cops[j]->sym->m_src;
199                         rte_prefetch0(pkt);
200
201                         priv = get_priv(pkt);
202                         sa = priv->sa;
203
204                         RTE_ASSERT(sa != NULL);
205
206                         ret = xform_func(pkt, sa, cops[j]);
207                         if (unlikely(ret))
208                                 rte_pktmbuf_free(pkt);
209                         else
210                                 pkts[nb_pkts++] = pkt;
211                 }
212         }
213
214         /* return packets */
215         return nb_pkts;
216 }
217
218 uint16_t
219 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
220                 uint16_t nb_pkts, uint16_t len)
221 {
222         struct ipsec_sa *sas[nb_pkts];
223
224         inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
225
226         ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
227
228         return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
229 }
230
231 uint16_t
232 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
233                 uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
234 {
235         struct ipsec_sa *sas[nb_pkts];
236
237         outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
238
239         ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
240
241         return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
242 }