1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
6 * Security Associations
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
31 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
33 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
35 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
37 struct supported_cipher_algo {
39 enum rte_crypto_cipher_algorithm algo;
45 struct supported_auth_algo {
47 enum rte_crypto_auth_algorithm algo;
53 struct supported_aead_algo {
55 enum rte_crypto_aead_algorithm algo;
64 const struct supported_cipher_algo cipher_algos[] = {
67 .algo = RTE_CRYPTO_CIPHER_NULL,
73 .keyword = "aes-128-cbc",
74 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
80 .keyword = "aes-192-cbc",
81 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
87 .keyword = "aes-256-cbc",
88 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
94 .keyword = "aes-128-ctr",
95 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
101 .keyword = "3des-cbc",
102 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
109 const struct supported_auth_algo auth_algos[] = {
112 .algo = RTE_CRYPTO_AUTH_NULL,
118 .keyword = "sha1-hmac",
119 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
124 .keyword = "sha256-hmac",
125 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
131 const struct supported_aead_algo aead_algos[] = {
133 .keyword = "aes-128-gcm",
134 .algo = RTE_CRYPTO_AEAD_AES_GCM,
142 .keyword = "aes-192-gcm",
143 .algo = RTE_CRYPTO_AEAD_AES_GCM,
151 .keyword = "aes-256-gcm",
152 .algo = RTE_CRYPTO_AEAD_AES_GCM,
161 #define SA_INIT_NB 128
163 struct ipsec_sa *sa_out;
165 static uint32_t sa_out_sz;
166 static struct ipsec_sa_cnt sa_out_cnt;
168 struct ipsec_sa *sa_in;
170 static uint32_t sa_in_sz;
171 static struct ipsec_sa_cnt sa_in_cnt;
173 static const struct supported_cipher_algo *
174 find_match_cipher_algo(const char *cipher_keyword)
178 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
179 const struct supported_cipher_algo *algo =
182 if (strcmp(cipher_keyword, algo->keyword) == 0)
189 static const struct supported_auth_algo *
190 find_match_auth_algo(const char *auth_keyword)
194 for (i = 0; i < RTE_DIM(auth_algos); i++) {
195 const struct supported_auth_algo *algo =
198 if (strcmp(auth_keyword, algo->keyword) == 0)
205 static const struct supported_aead_algo *
206 find_match_aead_algo(const char *aead_keyword)
210 for (i = 0; i < RTE_DIM(aead_algos); i++) {
211 const struct supported_aead_algo *algo =
214 if (strcmp(aead_keyword, algo->keyword) == 0)
222 * parse x:x:x:x.... hex number key string into uint8_t *key
224 * > 0: number of bytes parsed
228 parse_key_string(const char *key_str, uint8_t *key)
230 const char *pt_start = key_str, *pt_end = key_str;
231 uint32_t nb_bytes = 0;
233 while (pt_end != NULL) {
234 char sub_str[3] = {0};
236 pt_end = strchr(pt_start, ':');
238 if (pt_end == NULL) {
239 if (strlen(pt_start) > 2)
241 strncpy(sub_str, pt_start, 2);
243 if (pt_end - pt_start > 2)
246 strncpy(sub_str, pt_start, pt_end - pt_start);
247 pt_start = pt_end + 1;
250 key[nb_bytes++] = strtol(sub_str, NULL, 16);
257 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
259 if (*sa_tbl == NULL) {
260 *sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
263 *cur_sz = SA_INIT_NB;
267 if (cur_cnt >= *cur_sz) {
268 *sa_tbl = realloc(*sa_tbl,
269 *cur_sz * sizeof(struct ipsec_sa) * 2);
272 /* clean reallocated extra space */
273 memset(&(*sa_tbl)[*cur_sz], 0,
274 *cur_sz * sizeof(struct ipsec_sa));
282 parse_sa_tokens(char **tokens, uint32_t n_tokens,
283 struct parse_status *status)
285 struct ipsec_sa *rule = NULL;
286 struct rte_ipsec_session *ips;
287 uint32_t ti; /*token index*/
288 uint32_t *ri /*rule index*/;
289 struct ipsec_sa_cnt *sa_cnt;
290 uint32_t cipher_algo_p = 0;
291 uint32_t auth_algo_p = 0;
292 uint32_t aead_algo_p = 0;
297 uint32_t portid_p = 0;
298 uint32_t fallback_p = 0;
300 if (strcmp(tokens[0], "in") == 0) {
303 if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
306 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
309 sa_cnt = &sa_out_cnt;
310 if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
313 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
317 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
318 if (status->status < 0)
320 if (atoi(tokens[1]) == INVALID_SPI)
322 rule->spi = atoi(tokens[1]);
323 ips = ipsec_get_primary_session(rule);
325 for (ti = 2; ti < n_tokens; ti++) {
326 if (strcmp(tokens[ti], "mode") == 0) {
327 APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
328 if (status->status < 0)
331 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
332 if (status->status < 0)
335 if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
337 rule->flags = IP4_TUNNEL;
338 } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
340 rule->flags = IP6_TUNNEL;
341 } else if (strcmp(tokens[ti], "transport") == 0) {
344 rule->flags = TRANSPORT;
346 APP_CHECK(0, status, "unrecognized "
347 "input \"%s\"", tokens[ti]);
355 if (strcmp(tokens[ti], "cipher_algo") == 0) {
356 const struct supported_cipher_algo *algo;
359 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
361 if (status->status < 0)
364 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
365 if (status->status < 0)
368 algo = find_match_cipher_algo(tokens[ti]);
370 APP_CHECK(algo != NULL, status, "unrecognized "
371 "input \"%s\"", tokens[ti]);
373 if (status->status < 0)
376 rule->cipher_algo = algo->algo;
377 rule->block_size = algo->block_size;
378 rule->iv_len = algo->iv_len;
379 rule->cipher_key_len = algo->key_len;
381 /* for NULL algorithm, no cipher key required */
382 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
387 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
388 if (status->status < 0)
391 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
392 status, "unrecognized input \"%s\", "
393 "expect \"cipher_key\"", tokens[ti]);
394 if (status->status < 0)
397 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
398 if (status->status < 0)
401 key_len = parse_key_string(tokens[ti],
403 APP_CHECK(key_len == rule->cipher_key_len, status,
404 "unrecognized input \"%s\"", tokens[ti]);
405 if (status->status < 0)
408 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
409 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
410 rule->salt = (uint32_t)rte_rand();
412 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
414 rule->cipher_key_len = key_len;
416 &rule->cipher_key[key_len], 4);
423 if (strcmp(tokens[ti], "auth_algo") == 0) {
424 const struct supported_auth_algo *algo;
427 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
429 if (status->status < 0)
432 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
433 if (status->status < 0)
436 algo = find_match_auth_algo(tokens[ti]);
437 APP_CHECK(algo != NULL, status, "unrecognized "
438 "input \"%s\"", tokens[ti]);
440 if (status->status < 0)
443 rule->auth_algo = algo->algo;
444 rule->auth_key_len = algo->key_len;
445 rule->digest_len = algo->digest_len;
447 /* NULL algorithm and combined algos do not
450 if (algo->key_not_req) {
455 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
456 if (status->status < 0)
459 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
460 status, "unrecognized input \"%s\", "
461 "expect \"auth_key\"", tokens[ti]);
462 if (status->status < 0)
465 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
466 if (status->status < 0)
469 key_len = parse_key_string(tokens[ti],
471 APP_CHECK(key_len == rule->auth_key_len, status,
472 "unrecognized input \"%s\"", tokens[ti]);
473 if (status->status < 0)
480 if (strcmp(tokens[ti], "aead_algo") == 0) {
481 const struct supported_aead_algo *algo;
484 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
486 if (status->status < 0)
489 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
490 if (status->status < 0)
493 algo = find_match_aead_algo(tokens[ti]);
495 APP_CHECK(algo != NULL, status, "unrecognized "
496 "input \"%s\"", tokens[ti]);
498 if (status->status < 0)
501 rule->aead_algo = algo->algo;
502 rule->cipher_key_len = algo->key_len;
503 rule->digest_len = algo->digest_len;
504 rule->aad_len = algo->aad_len;
505 rule->block_size = algo->block_size;
506 rule->iv_len = algo->iv_len;
508 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
509 if (status->status < 0)
512 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
513 status, "unrecognized input \"%s\", "
514 "expect \"aead_key\"", tokens[ti]);
515 if (status->status < 0)
518 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
519 if (status->status < 0)
522 key_len = parse_key_string(tokens[ti],
524 APP_CHECK(key_len == rule->cipher_key_len, status,
525 "unrecognized input \"%s\"", tokens[ti]);
526 if (status->status < 0)
530 rule->cipher_key_len = key_len;
532 &rule->cipher_key[key_len], 4);
538 if (strcmp(tokens[ti], "src") == 0) {
539 APP_CHECK_PRESENCE(src_p, tokens[ti], status);
540 if (status->status < 0)
543 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
544 if (status->status < 0)
547 if (IS_IP4_TUNNEL(rule->flags)) {
550 APP_CHECK(parse_ipv4_addr(tokens[ti],
551 &ip, NULL) == 0, status,
552 "unrecognized input \"%s\", "
553 "expect valid ipv4 addr",
555 if (status->status < 0)
557 rule->src.ip.ip4 = rte_bswap32(
558 (uint32_t)ip.s_addr);
559 } else if (IS_IP6_TUNNEL(rule->flags)) {
562 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
564 "unrecognized input \"%s\", "
565 "expect valid ipv6 addr",
567 if (status->status < 0)
569 memcpy(rule->src.ip.ip6.ip6_b,
571 } else if (IS_TRANSPORT(rule->flags)) {
572 APP_CHECK(0, status, "unrecognized input "
573 "\"%s\"", tokens[ti]);
581 if (strcmp(tokens[ti], "dst") == 0) {
582 APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
583 if (status->status < 0)
586 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
587 if (status->status < 0)
590 if (IS_IP4_TUNNEL(rule->flags)) {
593 APP_CHECK(parse_ipv4_addr(tokens[ti],
594 &ip, NULL) == 0, status,
595 "unrecognized input \"%s\", "
596 "expect valid ipv4 addr",
598 if (status->status < 0)
600 rule->dst.ip.ip4 = rte_bswap32(
601 (uint32_t)ip.s_addr);
602 } else if (IS_IP6_TUNNEL(rule->flags)) {
605 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
607 "unrecognized input \"%s\", "
608 "expect valid ipv6 addr",
610 if (status->status < 0)
612 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
613 } else if (IS_TRANSPORT(rule->flags)) {
614 APP_CHECK(0, status, "unrecognized "
615 "input \"%s\"", tokens[ti]);
623 if (strcmp(tokens[ti], "type") == 0) {
624 APP_CHECK_PRESENCE(type_p, tokens[ti], status);
625 if (status->status < 0)
628 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
629 if (status->status < 0)
632 if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
634 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
635 else if (strcmp(tokens[ti],
636 "inline-protocol-offload") == 0)
638 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
639 else if (strcmp(tokens[ti],
640 "lookaside-protocol-offload") == 0)
642 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
643 else if (strcmp(tokens[ti], "no-offload") == 0)
644 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
645 else if (strcmp(tokens[ti], "cpu-crypto") == 0)
646 ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
648 APP_CHECK(0, status, "Invalid input \"%s\"",
657 if (strcmp(tokens[ti], "port_id") == 0) {
658 APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
659 if (status->status < 0)
661 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
662 if (status->status < 0)
664 rule->portid = atoi(tokens[ti]);
665 if (status->status < 0)
671 if (strcmp(tokens[ti], "fallback") == 0) {
672 struct rte_ipsec_session *fb;
674 APP_CHECK(app_sa_prm.enable, status, "Fallback session "
675 "not allowed for legacy mode.");
676 if (status->status < 0)
678 APP_CHECK(ips->type ==
679 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
680 "Fallback session allowed if primary session "
681 "is of type inline-crypto-offload only.");
682 if (status->status < 0)
684 APP_CHECK(rule->direction ==
685 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
686 "Fallback session not allowed for egress "
688 if (status->status < 0)
690 APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
691 if (status->status < 0)
693 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
694 if (status->status < 0)
696 fb = ipsec_get_fallback_session(rule);
697 if (strcmp(tokens[ti], "lookaside-none") == 0)
698 fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
699 else if (strcmp(tokens[ti], "cpu-crypto") == 0)
700 fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
702 APP_CHECK(0, status, "unrecognized fallback "
703 "type %s.", tokens[ti]);
707 rule->fallback_sessions = 1;
712 /* unrecognizeable input */
713 APP_CHECK(0, status, "unrecognized input \"%s\"",
719 APP_CHECK(cipher_algo_p == 0, status,
720 "AEAD used, no need for cipher options");
721 if (status->status < 0)
724 APP_CHECK(auth_algo_p == 0, status,
725 "AEAD used, no need for auth options");
726 if (status->status < 0)
729 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
730 if (status->status < 0)
733 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
734 if (status->status < 0)
738 APP_CHECK(mode_p == 1, status, "missing mode option");
739 if (status->status < 0)
742 if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
743 RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
744 printf("Missing portid option, falling back to non-offload\n");
746 if (!type_p || (!portid_p && ips->type !=
747 RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
748 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
756 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
760 const struct rte_ipsec_session *ips;
761 const struct rte_ipsec_session *fallback_ips;
763 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
765 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
766 if (cipher_algos[i].algo == sa->cipher_algo &&
767 cipher_algos[i].key_len == sa->cipher_key_len) {
768 printf("%s ", cipher_algos[i].keyword);
773 for (i = 0; i < RTE_DIM(auth_algos); i++) {
774 if (auth_algos[i].algo == sa->auth_algo) {
775 printf("%s ", auth_algos[i].keyword);
780 for (i = 0; i < RTE_DIM(aead_algos); i++) {
781 if (aead_algos[i].algo == sa->aead_algo &&
782 aead_algos[i].key_len-4 == sa->cipher_key_len) {
783 printf("%s ", aead_algos[i].keyword);
790 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
792 printf("IP4Tunnel ");
793 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
794 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
795 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
796 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
799 printf("IP6Tunnel ");
800 for (i = 0; i < 16; i++) {
801 if (i % 2 && i != 15)
802 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
804 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
807 for (i = 0; i < 16; i++) {
808 if (i % 2 && i != 15)
809 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
811 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
815 printf("Transport ");
819 ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
822 case RTE_SECURITY_ACTION_TYPE_NONE:
823 printf("no-offload ");
825 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
826 printf("inline-crypto-offload ");
828 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
829 printf("inline-protocol-offload ");
831 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
832 printf("lookaside-protocol-offload ");
834 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
835 printf("cpu-crypto-accelerated");
839 fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
840 if (fallback_ips != NULL && sa->fallback_sessions > 0) {
841 printf("inline fallback: ");
842 switch (fallback_ips->type) {
843 case RTE_SECURITY_ACTION_TYPE_NONE:
844 printf("lookaside-none");
846 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
847 printf("cpu-crypto-accelerated");
857 static struct sa_ctx *
858 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
861 struct sa_ctx *sa_ctx;
863 const struct rte_memzone *mz;
865 snprintf(s, sizeof(s), "%s_%u", name, socket_id);
867 /* Create SA context */
868 printf("Creating SA context with %u maximum entries on socket %d\n",
871 mz_size = sizeof(struct ipsec_xf) * nb_sa;
872 mz = rte_memzone_reserve(s, mz_size, socket_id,
873 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
875 printf("Failed to allocate SA XFORM memory\n");
880 sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
881 sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
883 if (sa_ctx == NULL) {
884 printf("Failed to allocate SA CTX memory\n");
886 rte_memzone_free(mz);
890 sa_ctx->xf = (struct ipsec_xf *)mz->addr;
891 sa_ctx->nb_sa = nb_sa;
897 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
899 struct rte_eth_dev_info dev_info;
902 retval = rte_eth_dev_info_get(portid, &dev_info);
905 "Error during getting device (port %u) info: %s\n",
906 portid, strerror(-retval));
912 if ((dev_info.rx_offload_capa &
913 DEV_RX_OFFLOAD_SECURITY) == 0) {
914 RTE_LOG(WARNING, PORT,
915 "hardware RX IPSec offload is not supported\n");
919 } else { /* outbound */
920 if ((dev_info.tx_offload_capa &
921 DEV_TX_OFFLOAD_SECURITY) == 0) {
922 RTE_LOG(WARNING, PORT,
923 "hardware TX IPSec offload is not supported\n");
931 * Helper function, tries to determine next_proto for SPI
932 * by searching though SP rules.
935 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
936 struct ip_addr ip_addr[2], uint32_t mask[2])
940 rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
942 rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
948 "%s: SPI %u used simultaeously by "
949 "IPv4(%d) and IPv6 (%d) SP rules\n",
950 __func__, spi, rc4, rc6);
954 } else if (rc6 < 0) {
956 "%s: SPI %u is not used by any SP rule\n",
964 * Helper function for getting source and destination IP addresses
965 * from SP. Needed for inline crypto transport mode, as addresses are not
966 * provided in config file for that mode. It checks if SP for current SA exists,
967 * and based on what type of protocol is returned, it stores appropriate
968 * addresses got from SP into SA.
971 sa_add_address_inline_crypto(struct ipsec_sa *sa)
974 struct ip_addr ip_addr[2];
977 protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
980 else if (protocol == IPPROTO_IPIP) {
981 sa->flags |= IP4_TRANSPORT;
982 if (mask[0] == IP4_FULL_MASK &&
983 mask[1] == IP4_FULL_MASK &&
984 ip_addr[0].ip.ip4 != 0 &&
985 ip_addr[1].ip.ip4 != 0) {
987 sa->src.ip.ip4 = ip_addr[0].ip.ip4;
988 sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
991 "%s: No valid address or mask entry in"
992 " IPv4 SP rule for SPI %u\n",
996 } else if (protocol == IPPROTO_IPV6) {
997 sa->flags |= IP6_TRANSPORT;
998 if (mask[0] == IP6_FULL_MASK &&
999 mask[1] == IP6_FULL_MASK &&
1000 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
1001 ip_addr[0].ip.ip6.ip6[1] != 0) &&
1002 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
1003 ip_addr[1].ip.ip6.ip6[1] != 0)) {
1005 sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1006 sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1009 "%s: No valid address or mask entry in"
1010 " IPv6 SP rule for SPI %u\n",
1019 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1020 uint32_t nb_entries, uint32_t inbound,
1021 struct socket_ctx *skt_ctx)
1023 struct ipsec_sa *sa;
1025 uint16_t iv_length, aad_length;
1028 struct rte_ipsec_session *ips;
1030 /* for ESN upper 32 bits of SQN also need to be part of AAD */
1031 aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1033 for (i = 0; i < nb_entries; i++) {
1035 sa = &sa_ctx->sa[idx];
1037 printf("Index %u already in use by SPI %u\n",
1044 rc = ipsec_sad_add(&sa_ctx->sad, sa);
1050 ips = ipsec_get_primary_session(sa);
1052 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1053 ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1054 if (check_eth_dev_caps(sa->portid, inbound))
1058 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1060 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1061 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1065 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1067 sa_add_address_inline_crypto(sa);
1068 if (inline_status < 0)
1069 return inline_status;
1074 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
1077 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1078 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1079 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1080 sa_ctx->xf[idx].a.aead.key.length =
1082 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1083 RTE_CRYPTO_AEAD_OP_DECRYPT :
1084 RTE_CRYPTO_AEAD_OP_ENCRYPT;
1085 sa_ctx->xf[idx].a.next = NULL;
1086 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1087 sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1088 sa_ctx->xf[idx].a.aead.aad_length =
1089 sa->aad_len + aad_length;
1090 sa_ctx->xf[idx].a.aead.digest_length =
1093 sa->xforms = &sa_ctx->xf[idx].a;
1095 switch (sa->cipher_algo) {
1096 case RTE_CRYPTO_CIPHER_NULL:
1097 case RTE_CRYPTO_CIPHER_3DES_CBC:
1098 case RTE_CRYPTO_CIPHER_AES_CBC:
1099 iv_length = sa->iv_len;
1101 case RTE_CRYPTO_CIPHER_AES_CTR:
1105 RTE_LOG(ERR, IPSEC_ESP,
1106 "unsupported cipher algorithm %u\n",
1112 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1113 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1114 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1115 sa_ctx->xf[idx].b.cipher.key.length =
1117 sa_ctx->xf[idx].b.cipher.op =
1118 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1119 sa_ctx->xf[idx].b.next = NULL;
1120 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1121 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1123 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1124 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1125 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1126 sa_ctx->xf[idx].a.auth.key.length =
1128 sa_ctx->xf[idx].a.auth.digest_length =
1130 sa_ctx->xf[idx].a.auth.op =
1131 RTE_CRYPTO_AUTH_OP_VERIFY;
1132 } else { /* outbound */
1133 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1134 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1135 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1136 sa_ctx->xf[idx].a.cipher.key.length =
1138 sa_ctx->xf[idx].a.cipher.op =
1139 RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1140 sa_ctx->xf[idx].a.next = NULL;
1141 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1142 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1144 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1145 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1146 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1147 sa_ctx->xf[idx].b.auth.key.length =
1149 sa_ctx->xf[idx].b.auth.digest_length =
1151 sa_ctx->xf[idx].b.auth.op =
1152 RTE_CRYPTO_AUTH_OP_GENERATE;
1155 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1156 sa_ctx->xf[idx].b.next = NULL;
1157 sa->xforms = &sa_ctx->xf[idx].a;
1161 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1163 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1164 rc = create_inline_session(skt_ctx, sa, ips);
1166 RTE_LOG(ERR, IPSEC_ESP,
1167 "create_inline_session() failed\n");
1172 print_one_sa_rule(sa, inbound);
1179 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1180 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1182 return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1186 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1187 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1189 return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1193 * helper function, fills parameters that are identical for all SAs
1196 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1197 const struct app_sa_prm *app_prm)
1199 memset(prm, 0, sizeof(*prm));
1201 prm->flags = app_prm->flags;
1202 prm->ipsec_xform.options.esn = app_prm->enable_esn;
1203 prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1207 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1208 const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1213 * Try to get SPI next proto by searching that SPI in SPD.
1214 * probably not the optimal way, but there seems nothing
1217 rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1221 fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1222 prm->userdata = (uintptr_t)ss;
1224 /* setup ipsec xform */
1225 prm->ipsec_xform.spi = ss->spi;
1226 prm->ipsec_xform.salt = ss->salt;
1227 prm->ipsec_xform.direction = ss->direction;
1228 prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1229 prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1230 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1231 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1232 prm->ipsec_xform.options.ecn = 1;
1233 prm->ipsec_xform.options.copy_dscp = 1;
1235 if (IS_IP4_TUNNEL(ss->flags)) {
1236 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1237 prm->tun.hdr_len = sizeof(*v4);
1238 prm->tun.next_proto = rc;
1240 } else if (IS_IP6_TUNNEL(ss->flags)) {
1241 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1242 prm->tun.hdr_len = sizeof(*v6);
1243 prm->tun.next_proto = rc;
1246 /* transport mode */
1247 prm->trs.proto = rc;
1250 /* setup crypto section */
1251 prm->crypto_xform = ss->xforms;
1256 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1262 if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1263 ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1264 if (ss->security.ses != NULL) {
1265 rc = rte_ipsec_session_prepare(ss);
1267 memset(ss, 0, sizeof(*ss));
1275 * Initialise related rte_ipsec_sa object.
1278 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1281 struct rte_ipsec_sa_prm prm;
1282 struct rte_ipsec_session *ips;
1283 struct rte_ipv4_hdr v4 = {
1284 .version_ihl = IPVERSION << 4 |
1285 sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1286 .time_to_live = IPDEFTTL,
1287 .next_proto_id = IPPROTO_ESP,
1288 .src_addr = lsa->src.ip.ip4,
1289 .dst_addr = lsa->dst.ip.ip4,
1291 struct rte_ipv6_hdr v6 = {
1292 .vtc_flow = htonl(IP6_VERSION << 28),
1293 .proto = IPPROTO_ESP,
1296 if (IS_IP6_TUNNEL(lsa->flags)) {
1297 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1298 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1301 rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1303 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1307 /* init primary processing session */
1308 ips = ipsec_get_primary_session(lsa);
1309 rc = fill_ipsec_session(ips, sa);
1313 /* init inline fallback processing session */
1314 if (lsa->fallback_sessions == 1)
1315 rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1321 * Allocate space and init rte_ipsec_sa strcutures,
1325 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1330 struct rte_ipsec_sa *sa;
1331 struct ipsec_sa *lsa;
1332 struct rte_ipsec_sa_prm prm;
1334 /* determine SA size */
1336 fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1337 sz = rte_ipsec_sa_size(&prm);
1339 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1340 "failed to determine SA size, error code: %d\n",
1341 __func__, ctx, nb_ent, socket, sz);
1347 ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1348 if (ctx->satbl == NULL) {
1350 "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1351 __func__, ctx, nb_ent, socket, tsz);
1356 for (i = 0; i != nb_ent && rc == 0; i++) {
1360 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1361 lsa = ctx->sa + idx;
1363 rc = ipsec_sa_init(lsa, sa, sz);
1370 sa_cmp(const void *p, const void *q)
1372 uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1373 uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1375 return (int)(spi1 - spi2);
1379 * Walk through all SA rules to find an SA with given SPI
1382 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1385 struct ipsec_sa *sa;
1386 struct ipsec_sa tmpl;
1387 const struct ipsec_sa *sar;
1397 sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1399 return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1405 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1411 rte_exit(EXIT_FAILURE, "NULL context.\n");
1413 if (ctx->sa_in != NULL)
1414 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1415 "initialized\n", socket_id);
1417 if (ctx->sa_out != NULL)
1418 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1419 "initialized\n", socket_id);
1423 ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1424 if (ctx->sa_in == NULL)
1425 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1426 "context %s in socket %d\n", rte_errno,
1429 rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1432 rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1434 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1436 if (app_sa_prm.enable != 0) {
1437 rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1440 rte_exit(EXIT_FAILURE,
1441 "failed to init inbound SAs\n");
1444 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1446 if (nb_sa_out > 0) {
1448 ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1449 if (ctx->sa_out == NULL)
1450 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1451 "context %s in socket %d\n", rte_errno,
1454 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1456 if (app_sa_prm.enable != 0) {
1457 rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1460 rte_exit(EXIT_FAILURE,
1461 "failed to init outbound SAs\n");
1464 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1469 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1471 struct ipsec_mbuf_metadata *priv;
1472 struct ipsec_sa *sa;
1477 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1479 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1484 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1485 void *sa_arr[], uint16_t nb_pkts)
1489 struct ipsec_sa *sa;
1491 sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1494 * Mark need for inline offload fallback on the LSB of SA pointer.
1495 * Thanks to packet grouping mechanism which ipsec_process is using
1496 * packets marked for fallback processing will form separate group.
1498 * Because it is not safe to use SA pointer it is casted to generic
1499 * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1500 * to get valid struct pointer.
1502 for (i = 0; i < nb_pkts; i++) {
1503 if (sa_arr[i] == NULL)
1506 result_sa = sa = sa_arr[i];
1507 if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1508 sa->fallback_sessions > 0) {
1509 uintptr_t intsa = (uintptr_t)sa;
1510 intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1511 result_sa = (void *)intsa;
1513 sa_arr[i] = result_sa;
1518 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1519 void *sa[], uint16_t nb_pkts)
1523 for (i = 0; i < nb_pkts; i++)
1524 sa[i] = &sa_ctx->sa[sa_idx[i]];
1528 * Select HW offloads to be used.
1531 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1532 uint64_t *tx_offloads)
1534 struct ipsec_sa *rule;
1536 enum rte_security_session_action_type rule_type;
1541 /* Check for inbound rules that use offloads and use this port */
1542 for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1543 rule = &sa_in[idx_sa];
1544 rule_type = ipsec_get_action_type(rule);
1545 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1547 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1548 && rule->portid == port_id)
1549 *rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
1552 /* Check for outbound rules that use offloads and use this port */
1553 for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1554 rule = &sa_out[idx_sa];
1555 rule_type = ipsec_get_action_type(rule);
1556 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1558 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1559 && rule->portid == port_id)
1560 *tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
1568 qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1569 qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);