1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
6 * Security Associations
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
30 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
32 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
34 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)
36 struct supported_cipher_algo {
38 enum rte_crypto_cipher_algorithm algo;
44 struct supported_auth_algo {
46 enum rte_crypto_auth_algorithm algo;
52 struct supported_aead_algo {
54 enum rte_crypto_aead_algorithm algo;
63 const struct supported_cipher_algo cipher_algos[] = {
66 .algo = RTE_CRYPTO_CIPHER_NULL,
72 .keyword = "aes-128-cbc",
73 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
79 .keyword = "aes-256-cbc",
80 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
86 .keyword = "aes-128-ctr",
87 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
93 .keyword = "3des-cbc",
94 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
101 const struct supported_auth_algo auth_algos[] = {
104 .algo = RTE_CRYPTO_AUTH_NULL,
110 .keyword = "sha1-hmac",
111 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
116 .keyword = "sha256-hmac",
117 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
123 const struct supported_aead_algo aead_algos[] = {
125 .keyword = "aes-128-gcm",
126 .algo = RTE_CRYPTO_AEAD_AES_GCM,
135 static struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES];
136 static uint32_t nb_sa_out;
138 static struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES];
139 static uint32_t nb_sa_in;
141 static const struct supported_cipher_algo *
142 find_match_cipher_algo(const char *cipher_keyword)
146 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
147 const struct supported_cipher_algo *algo =
150 if (strcmp(cipher_keyword, algo->keyword) == 0)
157 static const struct supported_auth_algo *
158 find_match_auth_algo(const char *auth_keyword)
162 for (i = 0; i < RTE_DIM(auth_algos); i++) {
163 const struct supported_auth_algo *algo =
166 if (strcmp(auth_keyword, algo->keyword) == 0)
173 static const struct supported_aead_algo *
174 find_match_aead_algo(const char *aead_keyword)
178 for (i = 0; i < RTE_DIM(aead_algos); i++) {
179 const struct supported_aead_algo *algo =
182 if (strcmp(aead_keyword, algo->keyword) == 0)
190 * parse x:x:x:x.... hex number key string into uint8_t *key
192 * > 0: number of bytes parsed
196 parse_key_string(const char *key_str, uint8_t *key)
198 const char *pt_start = key_str, *pt_end = key_str;
199 uint32_t nb_bytes = 0;
201 while (pt_end != NULL) {
202 char sub_str[3] = {0};
204 pt_end = strchr(pt_start, ':');
206 if (pt_end == NULL) {
207 if (strlen(pt_start) > 2)
209 strncpy(sub_str, pt_start, 2);
211 if (pt_end - pt_start > 2)
214 strncpy(sub_str, pt_start, pt_end - pt_start);
215 pt_start = pt_end + 1;
218 key[nb_bytes++] = strtol(sub_str, NULL, 16);
225 parse_sa_tokens(char **tokens, uint32_t n_tokens,
226 struct parse_status *status)
228 struct ipsec_sa *rule = NULL;
229 struct rte_ipsec_session *ips;
230 uint32_t ti; /*token index*/
231 uint32_t *ri /*rule index*/;
232 uint32_t cipher_algo_p = 0;
233 uint32_t auth_algo_p = 0;
234 uint32_t aead_algo_p = 0;
239 uint32_t portid_p = 0;
240 uint32_t fallback_p = 0;
242 if (strcmp(tokens[0], "in") == 0) {
245 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
246 "too many sa rules, abort insertion\n");
247 if (status->status < 0)
251 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
255 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
256 "too many sa rules, abort insertion\n");
257 if (status->status < 0)
261 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
265 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
266 if (status->status < 0)
268 if (atoi(tokens[1]) == INVALID_SPI)
270 rule->spi = atoi(tokens[1]);
271 ips = ipsec_get_primary_session(rule);
273 for (ti = 2; ti < n_tokens; ti++) {
274 if (strcmp(tokens[ti], "mode") == 0) {
275 APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
276 if (status->status < 0)
279 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
280 if (status->status < 0)
283 if (strcmp(tokens[ti], "ipv4-tunnel") == 0)
284 rule->flags = IP4_TUNNEL;
285 else if (strcmp(tokens[ti], "ipv6-tunnel") == 0)
286 rule->flags = IP6_TUNNEL;
287 else if (strcmp(tokens[ti], "transport") == 0)
288 rule->flags = TRANSPORT;
290 APP_CHECK(0, status, "unrecognized "
291 "input \"%s\"", tokens[ti]);
299 if (strcmp(tokens[ti], "cipher_algo") == 0) {
300 const struct supported_cipher_algo *algo;
303 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
305 if (status->status < 0)
308 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
309 if (status->status < 0)
312 algo = find_match_cipher_algo(tokens[ti]);
314 APP_CHECK(algo != NULL, status, "unrecognized "
315 "input \"%s\"", tokens[ti]);
317 if (status->status < 0)
320 rule->cipher_algo = algo->algo;
321 rule->block_size = algo->block_size;
322 rule->iv_len = algo->iv_len;
323 rule->cipher_key_len = algo->key_len;
325 /* for NULL algorithm, no cipher key required */
326 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
331 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
332 if (status->status < 0)
335 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
336 status, "unrecognized input \"%s\", "
337 "expect \"cipher_key\"", tokens[ti]);
338 if (status->status < 0)
341 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
342 if (status->status < 0)
345 key_len = parse_key_string(tokens[ti],
347 APP_CHECK(key_len == rule->cipher_key_len, status,
348 "unrecognized input \"%s\"", tokens[ti]);
349 if (status->status < 0)
352 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
353 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
354 rule->salt = (uint32_t)rte_rand();
356 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
358 rule->cipher_key_len = key_len;
360 &rule->cipher_key[key_len], 4);
367 if (strcmp(tokens[ti], "auth_algo") == 0) {
368 const struct supported_auth_algo *algo;
371 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
373 if (status->status < 0)
376 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
377 if (status->status < 0)
380 algo = find_match_auth_algo(tokens[ti]);
381 APP_CHECK(algo != NULL, status, "unrecognized "
382 "input \"%s\"", tokens[ti]);
384 if (status->status < 0)
387 rule->auth_algo = algo->algo;
388 rule->auth_key_len = algo->key_len;
389 rule->digest_len = algo->digest_len;
391 /* NULL algorithm and combined algos do not
394 if (algo->key_not_req) {
399 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
400 if (status->status < 0)
403 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
404 status, "unrecognized input \"%s\", "
405 "expect \"auth_key\"", tokens[ti]);
406 if (status->status < 0)
409 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
410 if (status->status < 0)
413 key_len = parse_key_string(tokens[ti],
415 APP_CHECK(key_len == rule->auth_key_len, status,
416 "unrecognized input \"%s\"", tokens[ti]);
417 if (status->status < 0)
424 if (strcmp(tokens[ti], "aead_algo") == 0) {
425 const struct supported_aead_algo *algo;
428 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
430 if (status->status < 0)
433 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
434 if (status->status < 0)
437 algo = find_match_aead_algo(tokens[ti]);
439 APP_CHECK(algo != NULL, status, "unrecognized "
440 "input \"%s\"", tokens[ti]);
442 if (status->status < 0)
445 rule->aead_algo = algo->algo;
446 rule->cipher_key_len = algo->key_len;
447 rule->digest_len = algo->digest_len;
448 rule->aad_len = algo->aad_len;
449 rule->block_size = algo->block_size;
450 rule->iv_len = algo->iv_len;
452 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
453 if (status->status < 0)
456 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
457 status, "unrecognized input \"%s\", "
458 "expect \"aead_key\"", tokens[ti]);
459 if (status->status < 0)
462 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
463 if (status->status < 0)
466 key_len = parse_key_string(tokens[ti],
468 APP_CHECK(key_len == rule->cipher_key_len, status,
469 "unrecognized input \"%s\"", tokens[ti]);
470 if (status->status < 0)
474 rule->cipher_key_len = key_len;
476 &rule->cipher_key[key_len], 4);
482 if (strcmp(tokens[ti], "src") == 0) {
483 APP_CHECK_PRESENCE(src_p, tokens[ti], status);
484 if (status->status < 0)
487 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
488 if (status->status < 0)
491 if (IS_IP4_TUNNEL(rule->flags)) {
494 APP_CHECK(parse_ipv4_addr(tokens[ti],
495 &ip, NULL) == 0, status,
496 "unrecognized input \"%s\", "
497 "expect valid ipv4 addr",
499 if (status->status < 0)
501 rule->src.ip.ip4 = rte_bswap32(
502 (uint32_t)ip.s_addr);
503 } else if (IS_IP6_TUNNEL(rule->flags)) {
506 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
508 "unrecognized input \"%s\", "
509 "expect valid ipv6 addr",
511 if (status->status < 0)
513 memcpy(rule->src.ip.ip6.ip6_b,
515 } else if (IS_TRANSPORT(rule->flags)) {
516 APP_CHECK(0, status, "unrecognized input "
517 "\"%s\"", tokens[ti]);
525 if (strcmp(tokens[ti], "dst") == 0) {
526 APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
527 if (status->status < 0)
530 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
531 if (status->status < 0)
534 if (IS_IP4_TUNNEL(rule->flags)) {
537 APP_CHECK(parse_ipv4_addr(tokens[ti],
538 &ip, NULL) == 0, status,
539 "unrecognized input \"%s\", "
540 "expect valid ipv4 addr",
542 if (status->status < 0)
544 rule->dst.ip.ip4 = rte_bswap32(
545 (uint32_t)ip.s_addr);
546 } else if (IS_IP6_TUNNEL(rule->flags)) {
549 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
551 "unrecognized input \"%s\", "
552 "expect valid ipv6 addr",
554 if (status->status < 0)
556 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
557 } else if (IS_TRANSPORT(rule->flags)) {
558 APP_CHECK(0, status, "unrecognized "
559 "input \"%s\"", tokens[ti]);
567 if (strcmp(tokens[ti], "type") == 0) {
568 APP_CHECK_PRESENCE(type_p, tokens[ti], status);
569 if (status->status < 0)
572 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
573 if (status->status < 0)
576 if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
578 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
579 else if (strcmp(tokens[ti],
580 "inline-protocol-offload") == 0)
582 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
583 else if (strcmp(tokens[ti],
584 "lookaside-protocol-offload") == 0)
586 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
587 else if (strcmp(tokens[ti], "no-offload") == 0)
588 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
590 APP_CHECK(0, status, "Invalid input \"%s\"",
599 if (strcmp(tokens[ti], "port_id") == 0) {
600 APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
601 if (status->status < 0)
603 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
604 if (status->status < 0)
606 rule->portid = atoi(tokens[ti]);
607 if (status->status < 0)
613 if (strcmp(tokens[ti], "fallback") == 0) {
614 struct rte_ipsec_session *fb;
616 APP_CHECK(app_sa_prm.enable, status, "Fallback session "
617 "not allowed for legacy mode.");
618 if (status->status < 0)
620 APP_CHECK(ips->type ==
621 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
622 "Fallback session allowed if primary session "
623 "is of type inline-crypto-offload only.");
624 if (status->status < 0)
626 APP_CHECK(rule->direction ==
627 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
628 "Fallback session not allowed for egress "
630 if (status->status < 0)
632 APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
633 if (status->status < 0)
635 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
636 if (status->status < 0)
638 fb = ipsec_get_fallback_session(rule);
639 if (strcmp(tokens[ti], "lookaside-none") == 0) {
640 fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
642 APP_CHECK(0, status, "unrecognized fallback "
643 "type %s.", tokens[ti]);
647 rule->fallback_sessions = 1;
652 /* unrecognizeable input */
653 APP_CHECK(0, status, "unrecognized input \"%s\"",
659 APP_CHECK(cipher_algo_p == 0, status,
660 "AEAD used, no need for cipher options");
661 if (status->status < 0)
664 APP_CHECK(auth_algo_p == 0, status,
665 "AEAD used, no need for auth options");
666 if (status->status < 0)
669 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
670 if (status->status < 0)
673 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
674 if (status->status < 0)
678 APP_CHECK(mode_p == 1, status, "missing mode option");
679 if (status->status < 0)
682 if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
683 printf("Missing portid option, falling back to non-offload\n");
685 if (!type_p || !portid_p) {
686 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
694 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
698 const struct rte_ipsec_session *ips;
699 const struct rte_ipsec_session *fallback_ips;
701 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
703 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
704 if (cipher_algos[i].algo == sa->cipher_algo &&
705 cipher_algos[i].key_len == sa->cipher_key_len) {
706 printf("%s ", cipher_algos[i].keyword);
711 for (i = 0; i < RTE_DIM(auth_algos); i++) {
712 if (auth_algos[i].algo == sa->auth_algo) {
713 printf("%s ", auth_algos[i].keyword);
718 for (i = 0; i < RTE_DIM(aead_algos); i++) {
719 if (aead_algos[i].algo == sa->aead_algo) {
720 printf("%s ", aead_algos[i].keyword);
727 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
729 printf("IP4Tunnel ");
730 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
731 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
732 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
733 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
736 printf("IP6Tunnel ");
737 for (i = 0; i < 16; i++) {
738 if (i % 2 && i != 15)
739 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
741 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
744 for (i = 0; i < 16; i++) {
745 if (i % 2 && i != 15)
746 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
748 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
752 printf("Transport ");
756 ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
759 case RTE_SECURITY_ACTION_TYPE_NONE:
760 printf("no-offload ");
762 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
763 printf("inline-crypto-offload ");
765 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
766 printf("inline-protocol-offload ");
768 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
769 printf("lookaside-protocol-offload ");
773 fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
774 if (fallback_ips != NULL && sa->fallback_sessions > 0) {
775 printf("inline fallback: ");
776 if (fallback_ips->type == RTE_SECURITY_ACTION_TYPE_NONE)
777 printf("lookaside-none");
785 void *satbl; /* pointer to array of rte_ipsec_sa objects*/
786 struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
789 struct rte_crypto_sym_xform a;
790 struct rte_crypto_sym_xform b;
792 } xf[IPSEC_SA_MAX_ENTRIES];
795 static struct sa_ctx *
796 sa_create(const char *name, int32_t socket_id)
799 struct sa_ctx *sa_ctx;
801 const struct rte_memzone *mz;
803 snprintf(s, sizeof(s), "%s_%u", name, socket_id);
805 /* Create SA array table */
806 printf("Creating SA context with %u maximum entries on socket %d\n",
807 IPSEC_SA_MAX_ENTRIES, socket_id);
809 mz_size = sizeof(struct sa_ctx);
810 mz = rte_memzone_reserve(s, mz_size, socket_id,
811 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
813 printf("Failed to allocate SA DB memory\n");
818 sa_ctx = (struct sa_ctx *)mz->addr;
824 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
826 struct rte_eth_dev_info dev_info;
829 retval = rte_eth_dev_info_get(portid, &dev_info);
832 "Error during getting device (port %u) info: %s\n",
833 portid, strerror(-retval));
839 if ((dev_info.rx_offload_capa &
840 DEV_RX_OFFLOAD_SECURITY) == 0) {
841 RTE_LOG(WARNING, PORT,
842 "hardware RX IPSec offload is not supported\n");
846 } else { /* outbound */
847 if ((dev_info.tx_offload_capa &
848 DEV_TX_OFFLOAD_SECURITY) == 0) {
849 RTE_LOG(WARNING, PORT,
850 "hardware TX IPSec offload is not supported\n");
858 * Helper function, tries to determine next_proto for SPI
859 * by searching though SP rules.
862 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
863 struct ip_addr ip_addr[2], uint32_t mask[2])
867 rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
869 rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
875 "%s: SPI %u used simultaeously by "
876 "IPv4(%d) and IPv6 (%d) SP rules\n",
877 __func__, spi, rc4, rc6);
881 } else if (rc6 < 0) {
883 "%s: SPI %u is not used by any SP rule\n",
891 * Helper function for getting source and destination IP addresses
892 * from SP. Needed for inline crypto transport mode, as addresses are not
893 * provided in config file for that mode. It checks if SP for current SA exists,
894 * and based on what type of protocol is returned, it stores appropriate
895 * addresses got from SP into SA.
898 sa_add_address_inline_crypto(struct ipsec_sa *sa)
901 struct ip_addr ip_addr[2];
904 protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
907 else if (protocol == IPPROTO_IPIP) {
908 sa->flags |= IP4_TRANSPORT;
909 if (mask[0] == IP4_FULL_MASK &&
910 mask[1] == IP4_FULL_MASK &&
911 ip_addr[0].ip.ip4 != 0 &&
912 ip_addr[1].ip.ip4 != 0) {
914 sa->src.ip.ip4 = ip_addr[0].ip.ip4;
915 sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
918 "%s: No valid address or mask entry in"
919 " IPv4 SP rule for SPI %u\n",
923 } else if (protocol == IPPROTO_IPV6) {
924 sa->flags |= IP6_TRANSPORT;
925 if (mask[0] == IP6_FULL_MASK &&
926 mask[1] == IP6_FULL_MASK &&
927 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
928 ip_addr[0].ip.ip6.ip6[1] != 0) &&
929 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
930 ip_addr[1].ip.ip6.ip6[1] != 0)) {
932 sa->src.ip.ip6 = ip_addr[0].ip.ip6;
933 sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
936 "%s: No valid address or mask entry in"
937 " IPv6 SP rule for SPI %u\n",
946 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
947 uint32_t nb_entries, uint32_t inbound,
948 struct socket_ctx *skt_ctx)
952 uint16_t iv_length, aad_length;
955 struct rte_ipsec_session *ips;
957 /* for ESN upper 32 bits of SQN also need to be part of AAD */
958 aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
960 for (i = 0; i < nb_entries; i++) {
961 idx = SPI2IDX(entries[i].spi);
962 sa = &sa_ctx->sa[idx];
964 printf("Index %u already in use by SPI %u\n",
970 ips = ipsec_get_primary_session(sa);
972 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
973 ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
974 if (check_eth_dev_caps(sa->portid, inbound))
979 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
981 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
982 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
986 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
988 sa_add_address_inline_crypto(sa);
989 if (inline_status < 0)
990 return inline_status;
995 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
996 struct rte_ipsec_session *ips;
999 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1000 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1001 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1002 sa_ctx->xf[idx].a.aead.key.length =
1004 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1005 RTE_CRYPTO_AEAD_OP_DECRYPT :
1006 RTE_CRYPTO_AEAD_OP_ENCRYPT;
1007 sa_ctx->xf[idx].a.next = NULL;
1008 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1009 sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1010 sa_ctx->xf[idx].a.aead.aad_length =
1011 sa->aad_len + aad_length;
1012 sa_ctx->xf[idx].a.aead.digest_length =
1015 sa->xforms = &sa_ctx->xf[idx].a;
1017 ips = ipsec_get_primary_session(sa);
1019 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1021 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1022 rc = create_inline_session(skt_ctx, sa, ips);
1024 RTE_LOG(ERR, IPSEC_ESP,
1025 "create_inline_session() failed\n");
1029 print_one_sa_rule(sa, inbound);
1031 switch (sa->cipher_algo) {
1032 case RTE_CRYPTO_CIPHER_NULL:
1033 case RTE_CRYPTO_CIPHER_3DES_CBC:
1034 case RTE_CRYPTO_CIPHER_AES_CBC:
1035 iv_length = sa->iv_len;
1037 case RTE_CRYPTO_CIPHER_AES_CTR:
1041 RTE_LOG(ERR, IPSEC_ESP,
1042 "unsupported cipher algorithm %u\n",
1048 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1049 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1050 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1051 sa_ctx->xf[idx].b.cipher.key.length =
1053 sa_ctx->xf[idx].b.cipher.op =
1054 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1055 sa_ctx->xf[idx].b.next = NULL;
1056 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1057 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1059 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1060 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1061 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1062 sa_ctx->xf[idx].a.auth.key.length =
1064 sa_ctx->xf[idx].a.auth.digest_length =
1066 sa_ctx->xf[idx].a.auth.op =
1067 RTE_CRYPTO_AUTH_OP_VERIFY;
1068 } else { /* outbound */
1069 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1070 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1071 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1072 sa_ctx->xf[idx].a.cipher.key.length =
1074 sa_ctx->xf[idx].a.cipher.op =
1075 RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1076 sa_ctx->xf[idx].a.next = NULL;
1077 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1078 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1080 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1081 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1082 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1083 sa_ctx->xf[idx].b.auth.key.length =
1085 sa_ctx->xf[idx].b.auth.digest_length =
1087 sa_ctx->xf[idx].b.auth.op =
1088 RTE_CRYPTO_AUTH_OP_GENERATE;
1091 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1092 sa_ctx->xf[idx].b.next = NULL;
1093 sa->xforms = &sa_ctx->xf[idx].a;
1095 print_one_sa_rule(sa, inbound);
1103 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1104 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1106 return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1110 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1111 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1113 return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1117 * helper function, fills parameters that are identical for all SAs
1120 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1121 const struct app_sa_prm *app_prm)
1123 memset(prm, 0, sizeof(*prm));
1125 prm->flags = app_prm->flags;
1126 prm->ipsec_xform.options.esn = app_prm->enable_esn;
1127 prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1131 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1132 const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1137 * Try to get SPI next proto by searching that SPI in SPD.
1138 * probably not the optimal way, but there seems nothing
1141 rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1145 fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1146 prm->userdata = (uintptr_t)ss;
1148 /* setup ipsec xform */
1149 prm->ipsec_xform.spi = ss->spi;
1150 prm->ipsec_xform.salt = ss->salt;
1151 prm->ipsec_xform.direction = ss->direction;
1152 prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1153 prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1154 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1155 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1156 prm->ipsec_xform.options.ecn = 1;
1157 prm->ipsec_xform.options.copy_dscp = 1;
1159 if (IS_IP4_TUNNEL(ss->flags)) {
1160 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1161 prm->tun.hdr_len = sizeof(*v4);
1162 prm->tun.next_proto = rc;
1164 } else if (IS_IP6_TUNNEL(ss->flags)) {
1165 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1166 prm->tun.hdr_len = sizeof(*v6);
1167 prm->tun.next_proto = rc;
1170 /* transport mode */
1171 prm->trs.proto = rc;
1174 /* setup crypto section */
1175 prm->crypto_xform = ss->xforms;
1180 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1186 if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1187 ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1188 if (ss->security.ses != NULL) {
1189 rc = rte_ipsec_session_prepare(ss);
1191 memset(ss, 0, sizeof(*ss));
1199 * Initialise related rte_ipsec_sa object.
1202 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1205 struct rte_ipsec_sa_prm prm;
1206 struct rte_ipsec_session *ips;
1207 struct rte_ipv4_hdr v4 = {
1208 .version_ihl = IPVERSION << 4 |
1209 sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1210 .time_to_live = IPDEFTTL,
1211 .next_proto_id = IPPROTO_ESP,
1212 .src_addr = lsa->src.ip.ip4,
1213 .dst_addr = lsa->dst.ip.ip4,
1215 struct rte_ipv6_hdr v6 = {
1216 .vtc_flow = htonl(IP6_VERSION << 28),
1217 .proto = IPPROTO_ESP,
1220 if (IS_IP6_TUNNEL(lsa->flags)) {
1221 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1222 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1225 rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1227 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1231 /* init primary processing session */
1232 ips = ipsec_get_primary_session(lsa);
1233 rc = fill_ipsec_session(ips, sa);
1237 /* init inline fallback processing session */
1238 if (lsa->fallback_sessions == 1)
1239 rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1245 * Allocate space and init rte_ipsec_sa strcutures,
1249 ipsec_satbl_init(struct sa_ctx *ctx, const struct ipsec_sa *ent,
1250 uint32_t nb_ent, int32_t socket)
1255 struct rte_ipsec_sa *sa;
1256 struct ipsec_sa *lsa;
1257 struct rte_ipsec_sa_prm prm;
1259 /* determine SA size */
1260 idx = SPI2IDX(ent[0].spi);
1261 fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1262 sz = rte_ipsec_sa_size(&prm);
1264 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1265 "failed to determine SA size, error code: %d\n",
1266 __func__, ctx, nb_ent, socket, sz);
1272 ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1273 if (ctx->satbl == NULL) {
1275 "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1276 __func__, ctx, nb_ent, socket, tsz);
1281 for (i = 0; i != nb_ent && rc == 0; i++) {
1283 idx = SPI2IDX(ent[i].spi);
1285 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1286 lsa = ctx->sa + idx;
1288 rc = ipsec_sa_init(lsa, sa, sz);
1295 * Walk through all SA rules to find an SA with given SPI
1298 sa_spi_present(uint32_t spi, int inbound)
1301 const struct ipsec_sa *sar;
1311 for (i = 0; i != num; i++) {
1312 if (sar[i].spi == spi)
1320 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1326 rte_exit(EXIT_FAILURE, "NULL context.\n");
1328 if (ctx->sa_in != NULL)
1329 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1330 "initialized\n", socket_id);
1332 if (ctx->sa_out != NULL)
1333 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1334 "initialized\n", socket_id);
1338 ctx->sa_in = sa_create(name, socket_id);
1339 if (ctx->sa_in == NULL)
1340 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1341 "context %s in socket %d\n", rte_errno,
1344 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1346 if (app_sa_prm.enable != 0) {
1347 rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
1350 rte_exit(EXIT_FAILURE,
1351 "failed to init inbound SAs\n");
1354 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1356 if (nb_sa_out > 0) {
1358 ctx->sa_out = sa_create(name, socket_id);
1359 if (ctx->sa_out == NULL)
1360 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1361 "context %s in socket %d\n", rte_errno,
1364 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1366 if (app_sa_prm.enable != 0) {
1367 rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
1370 rte_exit(EXIT_FAILURE,
1371 "failed to init outbound SAs\n");
1374 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1379 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1381 struct ipsec_mbuf_metadata *priv;
1382 struct ipsec_sa *sa;
1387 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1389 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1394 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
1397 struct rte_esp_hdr *esp;
1399 uint32_t *src4_addr;
1401 struct ipsec_sa *sa;
1406 ip = rte_pktmbuf_mtod(pkt, struct ip *);
1407 esp = rte_pktmbuf_mtod_offset(pkt, struct rte_esp_hdr *, pkt->l3_len);
1409 if (esp->spi == INVALID_SPI)
1412 result_sa = sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
1413 if (rte_be_to_cpu_32(esp->spi) != sa->spi)
1417 * Mark need for inline offload fallback on the LSB of SA pointer.
1418 * Thanks to packet grouping mechanism which ipsec_process is using
1419 * packets marked for fallback processing will form separate group.
1421 * Because it is not safe to use SA pointer it is casted to generic
1422 * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1423 * to get valid struct pointer.
1425 if (MBUF_NO_SEC_OFFLOAD(pkt) && sa->fallback_sessions > 0) {
1426 uintptr_t intsa = (uintptr_t)sa;
1427 intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1428 result_sa = (void *)intsa;
1431 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1433 src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
1434 if ((ip->ip_v == IPVERSION) &&
1435 (sa->src.ip.ip4 == *src4_addr) &&
1436 (sa->dst.ip.ip4 == *(src4_addr + 1)))
1437 *sa_ret = result_sa;
1440 src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
1441 if ((ip->ip_v == IP6_VERSION) &&
1442 !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
1443 !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
1444 *sa_ret = result_sa;
1447 *sa_ret = result_sa;
1452 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1453 void *sa[], uint16_t nb_pkts)
1457 for (i = 0; i < nb_pkts; i++)
1458 single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
1462 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1463 void *sa[], uint16_t nb_pkts)
1467 for (i = 0; i < nb_pkts; i++)
1468 sa[i] = &sa_ctx->sa[sa_idx[i]];
1472 * Select HW offloads to be used.
1475 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1476 uint64_t *tx_offloads)
1478 struct ipsec_sa *rule;
1480 enum rte_security_session_action_type rule_type;
1485 /* Check for inbound rules that use offloads and use this port */
1486 for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1487 rule = &sa_in[idx_sa];
1488 rule_type = ipsec_get_action_type(rule);
1489 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1491 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1492 && rule->portid == port_id)
1493 *rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
1496 /* Check for outbound rules that use offloads and use this port */
1497 for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1498 rule = &sa_out[idx_sa];
1499 rule_type = ipsec_get_action_type(rule);
1500 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1502 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1503 && rule->portid == port_id)
1504 *tx_offloads |= DEV_TX_OFFLOAD_SECURITY;