4 * Copyright(c) 2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Security Associations
37 #include <sys/types.h>
38 #include <netinet/in.h>
39 #include <netinet/ip.h>
41 #include <rte_memzone.h>
42 #include <rte_crypto.h>
43 #include <rte_cryptodev.h>
44 #include <rte_byteorder.h>
45 #include <rte_errno.h>
50 /* SAs EP0 Outbound */
51 const struct ipsec_sa sa_ep0_out[] = {
52 { 5, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
54 esp4_tunnel_outbound_pre_crypto,
55 esp4_tunnel_outbound_post_crypto,
56 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
59 { 6, 0, IPv4(172, 16, 1, 6), IPv4(172, 16, 2, 6),
61 esp4_tunnel_outbound_pre_crypto,
62 esp4_tunnel_outbound_post_crypto,
63 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
66 { 7, 0, IPv4(172, 16, 1, 7), IPv4(172, 16, 2, 7),
68 esp4_tunnel_outbound_pre_crypto,
69 esp4_tunnel_outbound_post_crypto,
70 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
73 { 8, 0, IPv4(172, 16, 1, 8), IPv4(172, 16, 2, 8),
75 esp4_tunnel_outbound_pre_crypto,
76 esp4_tunnel_outbound_post_crypto,
77 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
80 { 9, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
82 esp4_tunnel_outbound_pre_crypto,
83 esp4_tunnel_outbound_post_crypto,
84 RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
90 const struct ipsec_sa sa_ep0_in[] = {
91 { 5, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
93 esp4_tunnel_inbound_pre_crypto,
94 esp4_tunnel_inbound_post_crypto,
95 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
98 { 6, 0, IPv4(172, 16, 2, 6), IPv4(172, 16, 1, 6),
100 esp4_tunnel_inbound_pre_crypto,
101 esp4_tunnel_inbound_post_crypto,
102 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
105 { 7, 0, IPv4(172, 16, 2, 7), IPv4(172, 16, 1, 7),
107 esp4_tunnel_inbound_pre_crypto,
108 esp4_tunnel_inbound_post_crypto,
109 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
112 { 8, 0, IPv4(172, 16, 2, 8), IPv4(172, 16, 1, 8),
114 esp4_tunnel_inbound_pre_crypto,
115 esp4_tunnel_inbound_post_crypto,
116 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
119 { 9, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
121 esp4_tunnel_inbound_pre_crypto,
122 esp4_tunnel_inbound_post_crypto,
123 RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
128 /* SAs EP1 Outbound */
129 const struct ipsec_sa sa_ep1_out[] = {
130 { 5, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
132 esp4_tunnel_outbound_pre_crypto,
133 esp4_tunnel_outbound_post_crypto,
134 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
137 { 6, 0, IPv4(172, 16, 2, 6), IPv4(172, 16, 1, 6),
139 esp4_tunnel_outbound_pre_crypto,
140 esp4_tunnel_outbound_post_crypto,
141 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
144 { 7, 0, IPv4(172, 16, 2, 7), IPv4(172, 16, 1, 7),
146 esp4_tunnel_outbound_pre_crypto,
147 esp4_tunnel_outbound_post_crypto,
148 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
151 { 8, 0, IPv4(172, 16, 2, 8), IPv4(172, 16, 1, 8),
153 esp4_tunnel_outbound_pre_crypto,
154 esp4_tunnel_outbound_post_crypto,
155 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
158 { 9, 0, IPv4(172, 16, 2, 5), IPv4(172, 16, 1, 5),
160 esp4_tunnel_outbound_pre_crypto,
161 esp4_tunnel_outbound_post_crypto,
162 RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
167 /* SAs EP1 Inbound */
168 const struct ipsec_sa sa_ep1_in[] = {
169 { 5, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
171 esp4_tunnel_inbound_pre_crypto,
172 esp4_tunnel_inbound_post_crypto,
173 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
176 { 6, 0, IPv4(172, 16, 1, 6), IPv4(172, 16, 2, 6),
178 esp4_tunnel_inbound_pre_crypto,
179 esp4_tunnel_inbound_post_crypto,
180 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
183 { 7, 0, IPv4(172, 16, 1, 7), IPv4(172, 16, 2, 7),
185 esp4_tunnel_inbound_pre_crypto,
186 esp4_tunnel_inbound_post_crypto,
187 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
190 { 8, 0, IPv4(172, 16, 1, 8), IPv4(172, 16, 2, 8),
192 esp4_tunnel_inbound_pre_crypto,
193 esp4_tunnel_inbound_post_crypto,
194 RTE_CRYPTO_CIPHER_AES_CBC, RTE_CRYPTO_AUTH_SHA1_HMAC,
197 { 9, 0, IPv4(172, 16, 1, 5), IPv4(172, 16, 2, 5),
199 esp4_tunnel_inbound_pre_crypto,
200 esp4_tunnel_inbound_post_crypto,
201 RTE_CRYPTO_CIPHER_NULL, RTE_CRYPTO_AUTH_NULL,
206 static uint8_t cipher_key[256] = "sixteenbytes key";
209 const struct rte_crypto_sym_xform aescbc_enc_xf = {
211 RTE_CRYPTO_SYM_XFORM_CIPHER,
212 {.cipher = { RTE_CRYPTO_CIPHER_OP_ENCRYPT, RTE_CRYPTO_CIPHER_AES_CBC,
213 .key = { cipher_key, 16 } }
217 const struct rte_crypto_sym_xform aescbc_dec_xf = {
219 RTE_CRYPTO_SYM_XFORM_CIPHER,
220 {.cipher = { RTE_CRYPTO_CIPHER_OP_DECRYPT, RTE_CRYPTO_CIPHER_AES_CBC,
221 .key = { cipher_key, 16 } }
225 static uint8_t auth_key[256] = "twentybytes hash key";
227 /* SHA1 HMAC xform */
228 const struct rte_crypto_sym_xform sha1hmac_gen_xf = {
230 RTE_CRYPTO_SYM_XFORM_AUTH,
231 {.auth = { RTE_CRYPTO_AUTH_OP_GENERATE, RTE_CRYPTO_AUTH_SHA1_HMAC,
232 .key = { auth_key, 20 }, 12, 0 }
236 const struct rte_crypto_sym_xform sha1hmac_verify_xf = {
238 RTE_CRYPTO_SYM_XFORM_AUTH,
239 {.auth = { RTE_CRYPTO_AUTH_OP_VERIFY, RTE_CRYPTO_AUTH_SHA1_HMAC,
240 .key = { auth_key, 20 }, 12, 0 }
245 const struct rte_crypto_sym_xform null_cipher_xf = {
247 RTE_CRYPTO_SYM_XFORM_CIPHER,
248 {.cipher = { .algo = RTE_CRYPTO_CIPHER_NULL }
252 const struct rte_crypto_sym_xform null_auth_xf = {
254 RTE_CRYPTO_SYM_XFORM_AUTH,
255 {.auth = { .algo = RTE_CRYPTO_AUTH_NULL }
260 struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
262 struct rte_crypto_sym_xform a;
263 struct rte_crypto_sym_xform b;
264 } xf[IPSEC_SA_MAX_ENTRIES];
267 static struct sa_ctx *
268 sa_ipv4_create(const char *name, int socket_id)
271 struct sa_ctx *sa_ctx;
273 const struct rte_memzone *mz;
275 snprintf(s, sizeof(s), "%s_%u", name, socket_id);
277 /* Create SA array table */
278 printf("Creating SA context with %u maximum entries\n",
279 IPSEC_SA_MAX_ENTRIES);
281 mz_size = sizeof(struct sa_ctx);
282 mz = rte_memzone_reserve(s, mz_size, socket_id,
283 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
285 printf("Failed to allocate SA DB memory\n");
290 sa_ctx = (struct sa_ctx *)mz->addr;
296 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
297 unsigned nb_entries, unsigned inbound)
302 for (i = 0; i < nb_entries; i++) {
303 idx = SPI2IDX(entries[i].spi);
304 sa = &sa_ctx->sa[idx];
306 printf("Index %u already in use by SPI %u\n",
311 sa->src = rte_cpu_to_be_32(sa->src);
312 sa->dst = rte_cpu_to_be_32(sa->dst);
314 if (sa->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
315 sa_ctx->xf[idx].a = null_auth_xf;
316 sa_ctx->xf[idx].b = null_cipher_xf;
318 sa_ctx->xf[idx].a = sha1hmac_verify_xf;
319 sa_ctx->xf[idx].b = aescbc_dec_xf;
321 } else { /* outbound */
322 if (sa->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
323 sa_ctx->xf[idx].a = null_cipher_xf;
324 sa_ctx->xf[idx].b = null_auth_xf;
326 sa_ctx->xf[idx].a = aescbc_enc_xf;
327 sa_ctx->xf[idx].b = sha1hmac_gen_xf;
330 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
331 sa_ctx->xf[idx].b.next = NULL;
332 sa->xforms = &sa_ctx->xf[idx].a;
339 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
342 return sa_add_rules(sa_ctx, entries, nb_entries, 0);
346 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
349 return sa_add_rules(sa_ctx, entries, nb_entries, 1);
353 sa_init(struct socket_ctx *ctx, int socket_id, unsigned ep)
355 const struct ipsec_sa *sa_out_entries, *sa_in_entries;
356 unsigned nb_out_entries, nb_in_entries;
360 rte_exit(EXIT_FAILURE, "NULL context.\n");
362 if (ctx->sa_ipv4_in != NULL)
363 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
364 "initialized\n", socket_id);
366 if (ctx->sa_ipv4_out != NULL)
367 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
368 "initialized\n", socket_id);
371 sa_out_entries = sa_ep0_out;
372 nb_out_entries = RTE_DIM(sa_ep0_out);
373 sa_in_entries = sa_ep0_in;
374 nb_in_entries = RTE_DIM(sa_ep0_in);
375 } else if (ep == 1) {
376 sa_out_entries = sa_ep1_out;
377 nb_out_entries = RTE_DIM(sa_ep1_out);
378 sa_in_entries = sa_ep1_in;
379 nb_in_entries = RTE_DIM(sa_ep1_in);
381 rte_exit(EXIT_FAILURE, "Invalid EP value %u. "
382 "Only 0 or 1 supported.\n", ep);
385 ctx->sa_ipv4_in = sa_ipv4_create(name, socket_id);
386 if (ctx->sa_ipv4_in == NULL)
387 rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
388 "in socket %d\n", rte_errno, name, socket_id);
390 name = "sa_ipv4_out";
391 ctx->sa_ipv4_out = sa_ipv4_create(name, socket_id);
392 if (ctx->sa_ipv4_out == NULL)
393 rte_exit(EXIT_FAILURE, "Error [%d] creating SA context %s "
394 "in socket %d\n", rte_errno, name, socket_id);
396 sa_in_add_rules(ctx->sa_ipv4_in, sa_in_entries, nb_in_entries);
398 sa_out_add_rules(ctx->sa_ipv4_out, sa_out_entries, nb_out_entries);
402 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
404 struct ipsec_mbuf_metadata *priv;
406 priv = RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
408 return (sa_ctx->sa[sa_idx].spi == priv->sa->spi);
412 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
413 struct ipsec_sa *sa[], uint16_t nb_pkts)
418 for (i = 0; i < nb_pkts; i++) {
419 spi = rte_pktmbuf_mtod_offset(pkts[i], struct esp_hdr *,
420 sizeof(struct ip))->spi;
422 if (spi == INVALID_SPI)
425 sa[i] = &sa_ctx->sa[SPI2IDX(spi)];
426 if (spi != sa[i]->spi) {
431 src = rte_pktmbuf_mtod_offset(pkts[i], uint32_t *,
432 offsetof(struct ip, ip_src));
433 if ((sa[i]->src != *src) || (sa[i]->dst != *(src + 1)))
439 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
440 struct ipsec_sa *sa[], uint16_t nb_pkts)
444 for (i = 0; i < nb_pkts; i++)
445 sa[i] = &sa_ctx->sa[sa_idx[i]];