1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
6 * Security Associations
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
30 struct supported_cipher_algo {
32 enum rte_crypto_cipher_algorithm algo;
38 struct supported_auth_algo {
40 enum rte_crypto_auth_algorithm algo;
46 struct supported_aead_algo {
48 enum rte_crypto_aead_algorithm algo;
57 const struct supported_cipher_algo cipher_algos[] = {
60 .algo = RTE_CRYPTO_CIPHER_NULL,
66 .keyword = "aes-128-cbc",
67 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
73 .keyword = "aes-256-cbc",
74 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
80 .keyword = "aes-128-ctr",
81 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
83 .block_size = 16, /* XXX AESNI MB limition, should be 4 */
87 .keyword = "3des-cbc",
88 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
95 const struct supported_auth_algo auth_algos[] = {
98 .algo = RTE_CRYPTO_AUTH_NULL,
104 .keyword = "sha1-hmac",
105 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
110 .keyword = "sha256-hmac",
111 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
117 const struct supported_aead_algo aead_algos[] = {
119 .keyword = "aes-128-gcm",
120 .algo = RTE_CRYPTO_AEAD_AES_GCM,
129 struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES];
132 struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES];
135 static const struct supported_cipher_algo *
136 find_match_cipher_algo(const char *cipher_keyword)
140 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
141 const struct supported_cipher_algo *algo =
144 if (strcmp(cipher_keyword, algo->keyword) == 0)
151 static const struct supported_auth_algo *
152 find_match_auth_algo(const char *auth_keyword)
156 for (i = 0; i < RTE_DIM(auth_algos); i++) {
157 const struct supported_auth_algo *algo =
160 if (strcmp(auth_keyword, algo->keyword) == 0)
167 static const struct supported_aead_algo *
168 find_match_aead_algo(const char *aead_keyword)
172 for (i = 0; i < RTE_DIM(aead_algos); i++) {
173 const struct supported_aead_algo *algo =
176 if (strcmp(aead_keyword, algo->keyword) == 0)
184 * parse x:x:x:x.... hex number key string into uint8_t *key
186 * > 0: number of bytes parsed
190 parse_key_string(const char *key_str, uint8_t *key)
192 const char *pt_start = key_str, *pt_end = key_str;
193 uint32_t nb_bytes = 0;
195 while (pt_end != NULL) {
196 char sub_str[3] = {0};
198 pt_end = strchr(pt_start, ':');
200 if (pt_end == NULL) {
201 if (strlen(pt_start) > 2)
203 strncpy(sub_str, pt_start, 2);
205 if (pt_end - pt_start > 2)
208 strncpy(sub_str, pt_start, pt_end - pt_start);
209 pt_start = pt_end + 1;
212 key[nb_bytes++] = strtol(sub_str, NULL, 16);
219 parse_sa_tokens(char **tokens, uint32_t n_tokens,
220 struct parse_status *status)
222 struct ipsec_sa *rule = NULL;
223 uint32_t ti; /*token index*/
224 uint32_t *ri /*rule index*/;
225 uint32_t cipher_algo_p = 0;
226 uint32_t auth_algo_p = 0;
227 uint32_t aead_algo_p = 0;
232 uint32_t portid_p = 0;
234 if (strcmp(tokens[0], "in") == 0) {
237 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
238 "too many sa rules, abort insertion\n");
239 if (status->status < 0)
246 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
247 "too many sa rules, abort insertion\n");
248 if (status->status < 0)
255 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
256 if (status->status < 0)
258 if (atoi(tokens[1]) == INVALID_SPI)
260 rule->spi = atoi(tokens[1]);
262 for (ti = 2; ti < n_tokens; ti++) {
263 if (strcmp(tokens[ti], "mode") == 0) {
264 APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
265 if (status->status < 0)
268 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
269 if (status->status < 0)
272 if (strcmp(tokens[ti], "ipv4-tunnel") == 0)
273 rule->flags = IP4_TUNNEL;
274 else if (strcmp(tokens[ti], "ipv6-tunnel") == 0)
275 rule->flags = IP6_TUNNEL;
276 else if (strcmp(tokens[ti], "transport") == 0)
277 rule->flags = TRANSPORT;
279 APP_CHECK(0, status, "unrecognized "
280 "input \"%s\"", tokens[ti]);
288 if (strcmp(tokens[ti], "cipher_algo") == 0) {
289 const struct supported_cipher_algo *algo;
292 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
294 if (status->status < 0)
297 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
298 if (status->status < 0)
301 algo = find_match_cipher_algo(tokens[ti]);
303 APP_CHECK(algo != NULL, status, "unrecognized "
304 "input \"%s\"", tokens[ti]);
306 rule->cipher_algo = algo->algo;
307 rule->block_size = algo->block_size;
308 rule->iv_len = algo->iv_len;
309 rule->cipher_key_len = algo->key_len;
311 /* for NULL algorithm, no cipher key required */
312 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
317 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
318 if (status->status < 0)
321 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
322 status, "unrecognized input \"%s\", "
323 "expect \"cipher_key\"", tokens[ti]);
324 if (status->status < 0)
327 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
328 if (status->status < 0)
331 key_len = parse_key_string(tokens[ti],
333 APP_CHECK(key_len == rule->cipher_key_len, status,
334 "unrecognized input \"%s\"", tokens[ti]);
335 if (status->status < 0)
338 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
339 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
340 rule->salt = (uint32_t)rte_rand();
342 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
344 rule->cipher_key_len = key_len;
346 &rule->cipher_key[key_len], 4);
353 if (strcmp(tokens[ti], "auth_algo") == 0) {
354 const struct supported_auth_algo *algo;
357 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
359 if (status->status < 0)
362 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
363 if (status->status < 0)
366 algo = find_match_auth_algo(tokens[ti]);
367 APP_CHECK(algo != NULL, status, "unrecognized "
368 "input \"%s\"", tokens[ti]);
370 rule->auth_algo = algo->algo;
371 rule->auth_key_len = algo->key_len;
372 rule->digest_len = algo->digest_len;
374 /* NULL algorithm and combined algos do not
377 if (algo->key_not_req) {
382 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
383 if (status->status < 0)
386 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
387 status, "unrecognized input \"%s\", "
388 "expect \"auth_key\"", tokens[ti]);
389 if (status->status < 0)
392 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
393 if (status->status < 0)
396 key_len = parse_key_string(tokens[ti],
398 APP_CHECK(key_len == rule->auth_key_len, status,
399 "unrecognized input \"%s\"", tokens[ti]);
400 if (status->status < 0)
407 if (strcmp(tokens[ti], "aead_algo") == 0) {
408 const struct supported_aead_algo *algo;
411 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
413 if (status->status < 0)
416 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
417 if (status->status < 0)
420 algo = find_match_aead_algo(tokens[ti]);
422 APP_CHECK(algo != NULL, status, "unrecognized "
423 "input \"%s\"", tokens[ti]);
425 rule->aead_algo = algo->algo;
426 rule->cipher_key_len = algo->key_len;
427 rule->digest_len = algo->digest_len;
428 rule->aad_len = algo->aad_len;
429 rule->block_size = algo->block_size;
430 rule->iv_len = algo->iv_len;
432 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
433 if (status->status < 0)
436 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
437 status, "unrecognized input \"%s\", "
438 "expect \"aead_key\"", tokens[ti]);
439 if (status->status < 0)
442 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
443 if (status->status < 0)
446 key_len = parse_key_string(tokens[ti],
448 APP_CHECK(key_len == rule->cipher_key_len, status,
449 "unrecognized input \"%s\"", tokens[ti]);
450 if (status->status < 0)
454 rule->cipher_key_len = key_len;
456 &rule->cipher_key[key_len], 4);
462 if (strcmp(tokens[ti], "src") == 0) {
463 APP_CHECK_PRESENCE(src_p, tokens[ti], status);
464 if (status->status < 0)
467 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
468 if (status->status < 0)
471 if (rule->flags == IP4_TUNNEL) {
474 APP_CHECK(parse_ipv4_addr(tokens[ti],
475 &ip, NULL) == 0, status,
476 "unrecognized input \"%s\", "
477 "expect valid ipv4 addr",
479 if (status->status < 0)
481 rule->src.ip.ip4 = rte_bswap32(
482 (uint32_t)ip.s_addr);
483 } else if (rule->flags == IP6_TUNNEL) {
486 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
488 "unrecognized input \"%s\", "
489 "expect valid ipv6 addr",
491 if (status->status < 0)
493 memcpy(rule->src.ip.ip6.ip6_b,
495 } else if (rule->flags == TRANSPORT) {
496 APP_CHECK(0, status, "unrecognized input "
497 "\"%s\"", tokens[ti]);
505 if (strcmp(tokens[ti], "dst") == 0) {
506 APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
507 if (status->status < 0)
510 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
511 if (status->status < 0)
514 if (rule->flags == IP4_TUNNEL) {
517 APP_CHECK(parse_ipv4_addr(tokens[ti],
518 &ip, NULL) == 0, status,
519 "unrecognized input \"%s\", "
520 "expect valid ipv4 addr",
522 if (status->status < 0)
524 rule->dst.ip.ip4 = rte_bswap32(
525 (uint32_t)ip.s_addr);
526 } else if (rule->flags == IP6_TUNNEL) {
529 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
531 "unrecognized input \"%s\", "
532 "expect valid ipv6 addr",
534 if (status->status < 0)
536 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
537 } else if (rule->flags == TRANSPORT) {
538 APP_CHECK(0, status, "unrecognized "
539 "input \"%s\"", tokens[ti]);
547 if (strcmp(tokens[ti], "type") == 0) {
548 APP_CHECK_PRESENCE(type_p, tokens[ti], status);
549 if (status->status < 0)
552 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
553 if (status->status < 0)
556 if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
558 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
559 else if (strcmp(tokens[ti],
560 "inline-protocol-offload") == 0)
562 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
563 else if (strcmp(tokens[ti],
564 "lookaside-protocol-offload") == 0)
566 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
567 else if (strcmp(tokens[ti], "no-offload") == 0)
568 rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
570 APP_CHECK(0, status, "Invalid input \"%s\"",
579 if (strcmp(tokens[ti], "port_id") == 0) {
580 APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
581 if (status->status < 0)
583 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
584 if (status->status < 0)
586 rule->portid = atoi(tokens[ti]);
587 if (status->status < 0)
593 /* unrecognizeable input */
594 APP_CHECK(0, status, "unrecognized input \"%s\"",
600 APP_CHECK(cipher_algo_p == 0, status,
601 "AEAD used, no need for cipher options");
602 if (status->status < 0)
605 APP_CHECK(auth_algo_p == 0, status,
606 "AEAD used, no need for auth options");
607 if (status->status < 0)
610 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
611 if (status->status < 0)
614 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
615 if (status->status < 0)
619 APP_CHECK(mode_p == 1, status, "missing mode option");
620 if (status->status < 0)
623 if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
624 printf("Missing portid option, falling back to non-offload\n");
626 if (!type_p || !portid_p) {
627 rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
635 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
640 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
642 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
643 if (cipher_algos[i].algo == sa->cipher_algo &&
644 cipher_algos[i].key_len == sa->cipher_key_len) {
645 printf("%s ", cipher_algos[i].keyword);
650 for (i = 0; i < RTE_DIM(auth_algos); i++) {
651 if (auth_algos[i].algo == sa->auth_algo) {
652 printf("%s ", auth_algos[i].keyword);
657 for (i = 0; i < RTE_DIM(aead_algos); i++) {
658 if (aead_algos[i].algo == sa->aead_algo) {
659 printf("%s ", aead_algos[i].keyword);
668 printf("IP4Tunnel ");
669 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
670 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
671 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
672 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
675 printf("IP6Tunnel ");
676 for (i = 0; i < 16; i++) {
677 if (i % 2 && i != 15)
678 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
680 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
683 for (i = 0; i < 16; i++) {
684 if (i % 2 && i != 15)
685 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
687 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
698 void *satbl; /* pointer to array of rte_ipsec_sa objects*/
699 struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
702 struct rte_crypto_sym_xform a;
703 struct rte_crypto_sym_xform b;
705 } xf[IPSEC_SA_MAX_ENTRIES];
708 static struct sa_ctx *
709 sa_create(const char *name, int32_t socket_id)
712 struct sa_ctx *sa_ctx;
714 const struct rte_memzone *mz;
716 snprintf(s, sizeof(s), "%s_%u", name, socket_id);
718 /* Create SA array table */
719 printf("Creating SA context with %u maximum entries\n",
720 IPSEC_SA_MAX_ENTRIES);
722 mz_size = sizeof(struct sa_ctx);
723 mz = rte_memzone_reserve(s, mz_size, socket_id,
724 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
726 printf("Failed to allocate SA DB memory\n");
731 sa_ctx = (struct sa_ctx *)mz->addr;
737 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
739 struct rte_eth_dev_info dev_info;
741 rte_eth_dev_info_get(portid, &dev_info);
744 if ((dev_info.rx_offload_capa &
745 DEV_RX_OFFLOAD_SECURITY) == 0) {
746 RTE_LOG(WARNING, PORT,
747 "hardware RX IPSec offload is not supported\n");
751 } else { /* outbound */
752 if ((dev_info.tx_offload_capa &
753 DEV_TX_OFFLOAD_SECURITY) == 0) {
754 RTE_LOG(WARNING, PORT,
755 "hardware TX IPSec offload is not supported\n");
764 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
765 uint32_t nb_entries, uint32_t inbound)
769 uint16_t iv_length, aad_length;
771 /* for ESN upper 32 bits of SQN also need to be part of AAD */
772 aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
774 for (i = 0; i < nb_entries; i++) {
775 idx = SPI2IDX(entries[i].spi);
776 sa = &sa_ctx->sa[idx];
778 printf("Index %u already in use by SPI %u\n",
785 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
786 sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
787 if (check_eth_dev_caps(sa->portid, inbound))
791 sa->direction = (inbound == 1) ?
792 RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
793 RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
797 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
798 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
801 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
804 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
805 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
806 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
807 sa_ctx->xf[idx].a.aead.key.length =
809 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
810 RTE_CRYPTO_AEAD_OP_DECRYPT :
811 RTE_CRYPTO_AEAD_OP_ENCRYPT;
812 sa_ctx->xf[idx].a.next = NULL;
813 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
814 sa_ctx->xf[idx].a.aead.iv.length = iv_length;
815 sa_ctx->xf[idx].a.aead.aad_length =
816 sa->aad_len + aad_length;
817 sa_ctx->xf[idx].a.aead.digest_length =
820 sa->xforms = &sa_ctx->xf[idx].a;
822 print_one_sa_rule(sa, inbound);
824 switch (sa->cipher_algo) {
825 case RTE_CRYPTO_CIPHER_NULL:
826 case RTE_CRYPTO_CIPHER_3DES_CBC:
827 case RTE_CRYPTO_CIPHER_AES_CBC:
828 iv_length = sa->iv_len;
830 case RTE_CRYPTO_CIPHER_AES_CTR:
834 RTE_LOG(ERR, IPSEC_ESP,
835 "unsupported cipher algorithm %u\n",
841 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
842 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
843 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
844 sa_ctx->xf[idx].b.cipher.key.length =
846 sa_ctx->xf[idx].b.cipher.op =
847 RTE_CRYPTO_CIPHER_OP_DECRYPT;
848 sa_ctx->xf[idx].b.next = NULL;
849 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
850 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
852 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
853 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
854 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
855 sa_ctx->xf[idx].a.auth.key.length =
857 sa_ctx->xf[idx].a.auth.digest_length =
859 sa_ctx->xf[idx].a.auth.op =
860 RTE_CRYPTO_AUTH_OP_VERIFY;
861 } else { /* outbound */
862 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
863 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
864 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
865 sa_ctx->xf[idx].a.cipher.key.length =
867 sa_ctx->xf[idx].a.cipher.op =
868 RTE_CRYPTO_CIPHER_OP_ENCRYPT;
869 sa_ctx->xf[idx].a.next = NULL;
870 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
871 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
873 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
874 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
875 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
876 sa_ctx->xf[idx].b.auth.key.length =
878 sa_ctx->xf[idx].b.auth.digest_length =
880 sa_ctx->xf[idx].b.auth.op =
881 RTE_CRYPTO_AUTH_OP_GENERATE;
884 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
885 sa_ctx->xf[idx].b.next = NULL;
886 sa->xforms = &sa_ctx->xf[idx].a;
888 print_one_sa_rule(sa, inbound);
896 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
899 return sa_add_rules(sa_ctx, entries, nb_entries, 0);
903 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
906 return sa_add_rules(sa_ctx, entries, nb_entries, 1);
910 * helper function, fills parameters that are identical for all SAs
913 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
914 const struct app_sa_prm *app_prm)
916 memset(prm, 0, sizeof(*prm));
918 prm->flags = app_prm->flags;
919 prm->ipsec_xform.options.esn = app_prm->enable_esn;
920 prm->replay_win_sz = app_prm->window_size;
924 * Helper function, tries to determine next_proto for SPI
925 * by searching though SP rules.
928 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir)
932 rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
933 rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
938 "%s: SPI %u used simultaeously by "
939 "IPv4(%d) and IPv6 (%d) SP rules\n",
940 __func__, spi, rc4, rc6);
944 } else if (rc6 < 0) {
946 "%s: SPI %u is not used by any SP rule\n",
954 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
955 const struct ipv4_hdr *v4, struct ipv6_hdr *v6)
960 * Try to get SPI next proto by searching that SPI in SPD.
961 * probably not the optimal way, but there seems nothing
964 rc = get_spi_proto(ss->spi, ss->direction);
968 fill_ipsec_app_sa_prm(prm, &app_sa_prm);
969 prm->userdata = (uintptr_t)ss;
971 /* setup ipsec xform */
972 prm->ipsec_xform.spi = ss->spi;
973 prm->ipsec_xform.salt = ss->salt;
974 prm->ipsec_xform.direction = ss->direction;
975 prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
976 prm->ipsec_xform.mode = (ss->flags == TRANSPORT) ?
977 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
978 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
980 if (ss->flags == IP4_TUNNEL) {
981 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
982 prm->tun.hdr_len = sizeof(*v4);
983 prm->tun.next_proto = rc;
985 } else if (ss->flags == IP6_TUNNEL) {
986 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
987 prm->tun.hdr_len = sizeof(*v6);
988 prm->tun.next_proto = rc;
995 /* setup crypto section */
996 prm->crypto_xform = ss->xforms;
1001 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
1002 const struct ipsec_sa *lsa)
1005 ss->type = lsa->type;
1007 /* setup crypto section */
1008 if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
1009 ss->crypto.ses = lsa->crypto_session;
1010 /* setup session action type */
1012 ss->security.ses = lsa->sec_session;
1013 ss->security.ctx = lsa->security_ctx;
1014 ss->security.ol_flags = lsa->ol_flags;
1019 * Initialise related rte_ipsec_sa object.
1022 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1025 struct rte_ipsec_sa_prm prm;
1026 struct ipv4_hdr v4 = {
1027 .version_ihl = IPVERSION << 4 |
1028 sizeof(v4) / IPV4_IHL_MULTIPLIER,
1029 .time_to_live = IPDEFTTL,
1030 .next_proto_id = IPPROTO_ESP,
1031 .src_addr = lsa->src.ip.ip4,
1032 .dst_addr = lsa->dst.ip.ip4,
1034 struct ipv6_hdr v6 = {
1035 .vtc_flow = htonl(IP6_VERSION << 28),
1036 .proto = IPPROTO_ESP,
1039 if (lsa->flags == IP6_TUNNEL) {
1040 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1041 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1044 rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1046 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1050 fill_ipsec_session(&lsa->ips, sa, lsa);
1055 * Allocate space and init rte_ipsec_sa strcutures,
1059 ipsec_satbl_init(struct sa_ctx *ctx, const struct ipsec_sa *ent,
1060 uint32_t nb_ent, int32_t socket)
1065 struct rte_ipsec_sa *sa;
1066 struct ipsec_sa *lsa;
1067 struct rte_ipsec_sa_prm prm;
1069 /* determine SA size */
1070 idx = SPI2IDX(ent[0].spi);
1071 fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1072 sz = rte_ipsec_sa_size(&prm);
1074 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1075 "failed to determine SA size, error code: %d\n",
1076 __func__, ctx, nb_ent, socket, sz);
1082 ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1083 if (ctx->satbl == NULL) {
1085 "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1086 __func__, ctx, nb_ent, socket, tsz);
1091 for (i = 0; i != nb_ent && rc == 0; i++) {
1093 idx = SPI2IDX(ent[i].spi);
1095 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1096 lsa = ctx->sa + idx;
1098 rc = ipsec_sa_init(lsa, sa, sz);
1105 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1111 rte_exit(EXIT_FAILURE, "NULL context.\n");
1113 if (ctx->sa_in != NULL)
1114 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1115 "initialized\n", socket_id);
1117 if (ctx->sa_out != NULL)
1118 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1119 "initialized\n", socket_id);
1123 ctx->sa_in = sa_create(name, socket_id);
1124 if (ctx->sa_in == NULL)
1125 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1126 "context %s in socket %d\n", rte_errno,
1129 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
1131 if (app_sa_prm.enable != 0) {
1132 rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
1135 rte_exit(EXIT_FAILURE,
1136 "failed to init inbound SAs\n");
1139 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1141 if (nb_sa_out > 0) {
1143 ctx->sa_out = sa_create(name, socket_id);
1144 if (ctx->sa_out == NULL)
1145 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1146 "context %s in socket %d\n", rte_errno,
1149 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
1151 if (app_sa_prm.enable != 0) {
1152 rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
1155 rte_exit(EXIT_FAILURE,
1156 "failed to init outbound SAs\n");
1159 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1164 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1166 struct ipsec_mbuf_metadata *priv;
1167 struct ipsec_sa *sa;
1172 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1174 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1179 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
1180 struct ipsec_sa **sa_ret)
1182 struct esp_hdr *esp;
1184 uint32_t *src4_addr;
1186 struct ipsec_sa *sa;
1190 ip = rte_pktmbuf_mtod(pkt, struct ip *);
1191 if (ip->ip_v == IPVERSION)
1192 esp = (struct esp_hdr *)(ip + 1);
1194 esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1);
1196 if (esp->spi == INVALID_SPI)
1199 sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
1200 if (rte_be_to_cpu_32(esp->spi) != sa->spi)
1203 switch (sa->flags) {
1205 src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
1206 if ((ip->ip_v == IPVERSION) &&
1207 (sa->src.ip.ip4 == *src4_addr) &&
1208 (sa->dst.ip.ip4 == *(src4_addr + 1)))
1212 src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
1213 if ((ip->ip_v == IP6_VERSION) &&
1214 !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
1215 !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
1224 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1225 struct ipsec_sa *sa[], uint16_t nb_pkts)
1229 for (i = 0; i < nb_pkts; i++)
1230 single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
1234 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1235 struct ipsec_sa *sa[], uint16_t nb_pkts)
1239 for (i = 0; i < nb_pkts; i++)
1240 sa[i] = &sa_ctx->sa[sa_idx[i]];
1244 * Select HW offloads to be used.
1247 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1248 uint64_t *tx_offloads)
1250 struct ipsec_sa *rule;
1256 /* Check for inbound rules that use offloads and use this port */
1257 for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1258 rule = &sa_in[idx_sa];
1259 if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1261 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1262 && rule->portid == port_id)
1263 *rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
1266 /* Check for outbound rules that use offloads and use this port */
1267 for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1268 rule = &sa_out[idx_sa];
1269 if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1271 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1272 && rule->portid == port_id)
1273 *tx_offloads |= DEV_TX_OFFLOAD_SECURITY;