1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
6 * Security Associations
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
31 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
33 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
35 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)
37 struct supported_cipher_algo {
39 enum rte_crypto_cipher_algorithm algo;
45 struct supported_auth_algo {
47 enum rte_crypto_auth_algorithm algo;
53 struct supported_aead_algo {
55 enum rte_crypto_aead_algorithm algo;
64 const struct supported_cipher_algo cipher_algos[] = {
67 .algo = RTE_CRYPTO_CIPHER_NULL,
73 .keyword = "aes-128-cbc",
74 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
80 .keyword = "aes-192-cbc",
81 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
87 .keyword = "aes-256-cbc",
88 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
94 .keyword = "aes-128-ctr",
95 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
101 .keyword = "3des-cbc",
102 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
109 const struct supported_auth_algo auth_algos[] = {
112 .algo = RTE_CRYPTO_AUTH_NULL,
118 .keyword = "sha1-hmac",
119 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
124 .keyword = "sha256-hmac",
125 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
131 const struct supported_aead_algo aead_algos[] = {
133 .keyword = "aes-128-gcm",
134 .algo = RTE_CRYPTO_AEAD_AES_GCM,
142 .keyword = "aes-192-gcm",
143 .algo = RTE_CRYPTO_AEAD_AES_GCM,
151 .keyword = "aes-256-gcm",
152 .algo = RTE_CRYPTO_AEAD_AES_GCM,
161 #define SA_INIT_NB 128
163 static uint32_t nb_crypto_sessions;
164 struct ipsec_sa *sa_out;
166 static uint32_t sa_out_sz;
167 static struct ipsec_sa_cnt sa_out_cnt;
169 struct ipsec_sa *sa_in;
171 static uint32_t sa_in_sz;
172 static struct ipsec_sa_cnt sa_in_cnt;
174 static const struct supported_cipher_algo *
175 find_match_cipher_algo(const char *cipher_keyword)
179 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
180 const struct supported_cipher_algo *algo =
183 if (strcmp(cipher_keyword, algo->keyword) == 0)
190 static const struct supported_auth_algo *
191 find_match_auth_algo(const char *auth_keyword)
195 for (i = 0; i < RTE_DIM(auth_algos); i++) {
196 const struct supported_auth_algo *algo =
199 if (strcmp(auth_keyword, algo->keyword) == 0)
206 static const struct supported_aead_algo *
207 find_match_aead_algo(const char *aead_keyword)
211 for (i = 0; i < RTE_DIM(aead_algos); i++) {
212 const struct supported_aead_algo *algo =
215 if (strcmp(aead_keyword, algo->keyword) == 0)
223 * parse x:x:x:x.... hex number key string into uint8_t *key
225 * > 0: number of bytes parsed
229 parse_key_string(const char *key_str, uint8_t *key)
231 const char *pt_start = key_str, *pt_end = key_str;
232 uint32_t nb_bytes = 0;
234 while (pt_end != NULL) {
235 char sub_str[3] = {0};
237 pt_end = strchr(pt_start, ':');
239 if (pt_end == NULL) {
240 if (strlen(pt_start) > 2)
242 strncpy(sub_str, pt_start, 2);
244 if (pt_end - pt_start > 2)
247 strncpy(sub_str, pt_start, pt_end - pt_start);
248 pt_start = pt_end + 1;
251 key[nb_bytes++] = strtol(sub_str, NULL, 16);
258 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
260 if (*sa_tbl == NULL) {
261 *sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
264 *cur_sz = SA_INIT_NB;
268 if (cur_cnt >= *cur_sz) {
269 *sa_tbl = realloc(*sa_tbl,
270 *cur_sz * sizeof(struct ipsec_sa) * 2);
273 /* clean reallocated extra space */
274 memset(&(*sa_tbl)[*cur_sz], 0,
275 *cur_sz * sizeof(struct ipsec_sa));
283 parse_sa_tokens(char **tokens, uint32_t n_tokens,
284 struct parse_status *status)
286 struct ipsec_sa *rule = NULL;
287 struct rte_ipsec_session *ips;
288 uint32_t ti; /*token index*/
289 uint32_t *ri /*rule index*/;
290 struct ipsec_sa_cnt *sa_cnt;
291 uint32_t cipher_algo_p = 0;
292 uint32_t auth_algo_p = 0;
293 uint32_t aead_algo_p = 0;
298 uint32_t portid_p = 0;
299 uint32_t fallback_p = 0;
300 int16_t status_p = 0;
301 uint16_t udp_encap_p = 0;
303 if (strcmp(tokens[0], "in") == 0) {
306 if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
309 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
312 sa_cnt = &sa_out_cnt;
313 if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
316 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
320 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
321 if (status->status < 0)
323 if (atoi(tokens[1]) == INVALID_SPI)
325 rule->spi = atoi(tokens[1]);
326 rule->portid = UINT16_MAX;
327 ips = ipsec_get_primary_session(rule);
329 for (ti = 2; ti < n_tokens; ti++) {
330 if (strcmp(tokens[ti], "mode") == 0) {
331 APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
332 if (status->status < 0)
335 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
336 if (status->status < 0)
339 if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
341 rule->flags = IP4_TUNNEL;
342 } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
344 rule->flags = IP6_TUNNEL;
345 } else if (strcmp(tokens[ti], "transport") == 0) {
348 rule->flags = TRANSPORT;
350 APP_CHECK(0, status, "unrecognized "
351 "input \"%s\"", tokens[ti]);
359 if (strcmp(tokens[ti], "cipher_algo") == 0) {
360 const struct supported_cipher_algo *algo;
363 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
365 if (status->status < 0)
368 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
369 if (status->status < 0)
372 algo = find_match_cipher_algo(tokens[ti]);
374 APP_CHECK(algo != NULL, status, "unrecognized "
375 "input \"%s\"", tokens[ti]);
377 if (status->status < 0)
380 rule->cipher_algo = algo->algo;
381 rule->block_size = algo->block_size;
382 rule->iv_len = algo->iv_len;
383 rule->cipher_key_len = algo->key_len;
385 /* for NULL algorithm, no cipher key required */
386 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
391 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
392 if (status->status < 0)
395 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
396 status, "unrecognized input \"%s\", "
397 "expect \"cipher_key\"", tokens[ti]);
398 if (status->status < 0)
401 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
402 if (status->status < 0)
405 key_len = parse_key_string(tokens[ti],
407 APP_CHECK(key_len == rule->cipher_key_len, status,
408 "unrecognized input \"%s\"", tokens[ti]);
409 if (status->status < 0)
412 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
413 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
414 rule->salt = (uint32_t)rte_rand();
416 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
418 rule->cipher_key_len = key_len;
420 &rule->cipher_key[key_len], 4);
427 if (strcmp(tokens[ti], "auth_algo") == 0) {
428 const struct supported_auth_algo *algo;
431 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
433 if (status->status < 0)
436 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
437 if (status->status < 0)
440 algo = find_match_auth_algo(tokens[ti]);
441 APP_CHECK(algo != NULL, status, "unrecognized "
442 "input \"%s\"", tokens[ti]);
444 if (status->status < 0)
447 rule->auth_algo = algo->algo;
448 rule->auth_key_len = algo->key_len;
449 rule->digest_len = algo->digest_len;
451 /* NULL algorithm and combined algos do not
454 if (algo->key_not_req) {
459 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
460 if (status->status < 0)
463 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
464 status, "unrecognized input \"%s\", "
465 "expect \"auth_key\"", tokens[ti]);
466 if (status->status < 0)
469 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
470 if (status->status < 0)
473 key_len = parse_key_string(tokens[ti],
475 APP_CHECK(key_len == rule->auth_key_len, status,
476 "unrecognized input \"%s\"", tokens[ti]);
477 if (status->status < 0)
484 if (strcmp(tokens[ti], "aead_algo") == 0) {
485 const struct supported_aead_algo *algo;
488 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
490 if (status->status < 0)
493 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
494 if (status->status < 0)
497 algo = find_match_aead_algo(tokens[ti]);
499 APP_CHECK(algo != NULL, status, "unrecognized "
500 "input \"%s\"", tokens[ti]);
502 if (status->status < 0)
505 rule->aead_algo = algo->algo;
506 rule->cipher_key_len = algo->key_len;
507 rule->digest_len = algo->digest_len;
508 rule->aad_len = algo->aad_len;
509 rule->block_size = algo->block_size;
510 rule->iv_len = algo->iv_len;
512 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
513 if (status->status < 0)
516 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
517 status, "unrecognized input \"%s\", "
518 "expect \"aead_key\"", tokens[ti]);
519 if (status->status < 0)
522 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
523 if (status->status < 0)
526 key_len = parse_key_string(tokens[ti],
528 APP_CHECK(key_len == rule->cipher_key_len, status,
529 "unrecognized input \"%s\"", tokens[ti]);
530 if (status->status < 0)
534 rule->cipher_key_len = key_len;
536 &rule->cipher_key[key_len], 4);
542 if (strcmp(tokens[ti], "src") == 0) {
543 APP_CHECK_PRESENCE(src_p, tokens[ti], status);
544 if (status->status < 0)
547 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
548 if (status->status < 0)
551 if (IS_IP4_TUNNEL(rule->flags)) {
554 APP_CHECK(parse_ipv4_addr(tokens[ti],
555 &ip, NULL) == 0, status,
556 "unrecognized input \"%s\", "
557 "expect valid ipv4 addr",
559 if (status->status < 0)
561 rule->src.ip.ip4 = rte_bswap32(
562 (uint32_t)ip.s_addr);
563 } else if (IS_IP6_TUNNEL(rule->flags)) {
566 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
568 "unrecognized input \"%s\", "
569 "expect valid ipv6 addr",
571 if (status->status < 0)
573 memcpy(rule->src.ip.ip6.ip6_b,
575 } else if (IS_TRANSPORT(rule->flags)) {
576 APP_CHECK(0, status, "unrecognized input "
577 "\"%s\"", tokens[ti]);
585 if (strcmp(tokens[ti], "dst") == 0) {
586 APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
587 if (status->status < 0)
590 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
591 if (status->status < 0)
594 if (IS_IP4_TUNNEL(rule->flags)) {
597 APP_CHECK(parse_ipv4_addr(tokens[ti],
598 &ip, NULL) == 0, status,
599 "unrecognized input \"%s\", "
600 "expect valid ipv4 addr",
602 if (status->status < 0)
604 rule->dst.ip.ip4 = rte_bswap32(
605 (uint32_t)ip.s_addr);
606 } else if (IS_IP6_TUNNEL(rule->flags)) {
609 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
611 "unrecognized input \"%s\", "
612 "expect valid ipv6 addr",
614 if (status->status < 0)
616 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
617 } else if (IS_TRANSPORT(rule->flags)) {
618 APP_CHECK(0, status, "unrecognized "
619 "input \"%s\"", tokens[ti]);
627 if (strcmp(tokens[ti], "type") == 0) {
628 APP_CHECK_PRESENCE(type_p, tokens[ti], status);
629 if (status->status < 0)
632 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
633 if (status->status < 0)
636 if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
638 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
639 else if (strcmp(tokens[ti],
640 "inline-protocol-offload") == 0)
642 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
643 else if (strcmp(tokens[ti],
644 "lookaside-protocol-offload") == 0)
646 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
647 else if (strcmp(tokens[ti], "no-offload") == 0)
648 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
649 else if (strcmp(tokens[ti], "cpu-crypto") == 0)
650 ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
652 APP_CHECK(0, status, "Invalid input \"%s\"",
661 if (strcmp(tokens[ti], "port_id") == 0) {
662 APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
663 if (status->status < 0)
665 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
666 if (status->status < 0)
668 if (rule->portid == UINT16_MAX)
669 rule->portid = atoi(tokens[ti]);
670 else if (rule->portid != atoi(tokens[ti])) {
672 "portid %s not matching with already assigned portid %u",
673 tokens[ti], rule->portid);
680 if (strcmp(tokens[ti], "mss") == 0) {
681 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
682 if (status->status < 0)
684 rule->mss = atoi(tokens[ti]);
685 if (status->status < 0)
690 if (strcmp(tokens[ti], "fallback") == 0) {
691 struct rte_ipsec_session *fb;
693 APP_CHECK(app_sa_prm.enable, status, "Fallback session "
694 "not allowed for legacy mode.");
695 if (status->status < 0)
697 APP_CHECK(ips->type ==
698 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
699 "Fallback session allowed if primary session "
700 "is of type inline-crypto-offload only.");
701 if (status->status < 0)
703 APP_CHECK(rule->direction ==
704 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
705 "Fallback session not allowed for egress "
707 if (status->status < 0)
709 APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
710 if (status->status < 0)
712 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
713 if (status->status < 0)
715 fb = ipsec_get_fallback_session(rule);
716 if (strcmp(tokens[ti], "lookaside-none") == 0)
717 fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
718 else if (strcmp(tokens[ti], "cpu-crypto") == 0)
719 fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
721 APP_CHECK(0, status, "unrecognized fallback "
722 "type %s.", tokens[ti]);
726 rule->fallback_sessions = 1;
727 nb_crypto_sessions++;
731 if (strcmp(tokens[ti], "flow-direction") == 0) {
733 case RTE_SECURITY_ACTION_TYPE_NONE:
734 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
736 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
737 if (status->status < 0)
739 if (rule->portid == UINT16_MAX)
740 rule->portid = atoi(tokens[ti]);
741 else if (rule->portid != atoi(tokens[ti])) {
743 "portid %s not matching with already assigned portid %u",
744 tokens[ti], rule->portid);
747 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
748 if (status->status < 0)
750 rule->fdir_qid = atoi(tokens[ti]);
751 /* validating portid and queueid */
752 status_p = check_flow_params(rule->portid,
755 printf("port id %u / queue id %u is "
756 "not valid\n", rule->portid,
760 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
761 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
762 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
765 "flow director not supported for security session type %d",
771 if (strcmp(tokens[ti], "udp-encap") == 0) {
773 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
774 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
775 APP_CHECK_PRESENCE(udp_encap_p, tokens[ti],
777 if (status->status < 0)
781 app_sa_prm.udp_encap = 1;
786 "UDP encapsulation not supported for "
787 "security session type %d",
794 /* unrecognizeable input */
795 APP_CHECK(0, status, "unrecognized input \"%s\"",
801 APP_CHECK(cipher_algo_p == 0, status,
802 "AEAD used, no need for cipher options");
803 if (status->status < 0)
806 APP_CHECK(auth_algo_p == 0, status,
807 "AEAD used, no need for auth options");
808 if (status->status < 0)
811 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
812 if (status->status < 0)
815 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
816 if (status->status < 0)
820 APP_CHECK(mode_p == 1, status, "missing mode option");
821 if (status->status < 0)
824 if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
825 RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
826 printf("Missing portid option, falling back to non-offload\n");
828 if (!type_p || (!portid_p && ips->type !=
829 RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
830 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
833 nb_crypto_sessions++;
838 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
842 const struct rte_ipsec_session *ips;
843 const struct rte_ipsec_session *fallback_ips;
845 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
847 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
848 if (cipher_algos[i].algo == sa->cipher_algo &&
849 cipher_algos[i].key_len == sa->cipher_key_len) {
850 printf("%s ", cipher_algos[i].keyword);
855 for (i = 0; i < RTE_DIM(auth_algos); i++) {
856 if (auth_algos[i].algo == sa->auth_algo) {
857 printf("%s ", auth_algos[i].keyword);
862 for (i = 0; i < RTE_DIM(aead_algos); i++) {
863 if (aead_algos[i].algo == sa->aead_algo &&
864 aead_algos[i].key_len-4 == sa->cipher_key_len) {
865 printf("%s ", aead_algos[i].keyword);
872 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
874 printf("IP4Tunnel ");
875 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
876 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
877 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
878 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
881 printf("IP6Tunnel ");
882 for (i = 0; i < 16; i++) {
883 if (i % 2 && i != 15)
884 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
886 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
889 for (i = 0; i < 16; i++) {
890 if (i % 2 && i != 15)
891 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
893 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
897 printf("Transport ");
901 ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
904 case RTE_SECURITY_ACTION_TYPE_NONE:
905 printf("no-offload ");
907 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
908 printf("inline-crypto-offload ");
910 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
911 printf("inline-protocol-offload ");
913 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
914 printf("lookaside-protocol-offload ");
916 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
917 printf("cpu-crypto-accelerated ");
921 fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
922 if (fallback_ips != NULL && sa->fallback_sessions > 0) {
923 printf("inline fallback: ");
924 switch (fallback_ips->type) {
925 case RTE_SECURITY_ACTION_TYPE_NONE:
926 printf("lookaside-none");
928 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
929 printf("cpu-crypto-accelerated");
936 if (sa->fdir_flag == 1)
937 printf("flow-direction port %d queue %d", sa->portid,
943 static struct sa_ctx *
944 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
947 struct sa_ctx *sa_ctx;
949 const struct rte_memzone *mz;
951 snprintf(s, sizeof(s), "%s_%u", name, socket_id);
953 /* Create SA context */
954 printf("Creating SA context with %u maximum entries on socket %d\n",
957 mz_size = sizeof(struct ipsec_xf) * nb_sa;
958 mz = rte_memzone_reserve(s, mz_size, socket_id,
959 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
961 printf("Failed to allocate SA XFORM memory\n");
966 sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
967 sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
969 if (sa_ctx == NULL) {
970 printf("Failed to allocate SA CTX memory\n");
972 rte_memzone_free(mz);
976 sa_ctx->xf = (struct ipsec_xf *)mz->addr;
977 sa_ctx->nb_sa = nb_sa;
983 check_eth_dev_caps(uint16_t portid, uint32_t inbound, uint32_t tso)
985 struct rte_eth_dev_info dev_info;
988 retval = rte_eth_dev_info_get(portid, &dev_info);
991 "Error during getting device (port %u) info: %s\n",
992 portid, strerror(-retval));
998 if ((dev_info.rx_offload_capa &
999 RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
1000 RTE_LOG(WARNING, PORT,
1001 "hardware RX IPSec offload is not supported\n");
1005 } else { /* outbound */
1006 if ((dev_info.tx_offload_capa &
1007 RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
1008 RTE_LOG(WARNING, PORT,
1009 "hardware TX IPSec offload is not supported\n");
1012 if (tso && (dev_info.tx_offload_capa &
1013 RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
1014 RTE_LOG(WARNING, PORT,
1015 "hardware TCP TSO offload is not supported\n");
1023 * Helper function, tries to determine next_proto for SPI
1024 * by searching though SP rules.
1027 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
1028 struct ip_addr ip_addr[2], uint32_t mask[2])
1032 rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1034 rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1040 "%s: SPI %u used simultaeously by "
1041 "IPv4(%d) and IPv6 (%d) SP rules\n",
1042 __func__, spi, rc4, rc6);
1045 return IPPROTO_IPIP;
1046 } else if (rc6 < 0) {
1048 "%s: SPI %u is not used by any SP rule\n",
1052 return IPPROTO_IPV6;
1056 * Helper function for getting source and destination IP addresses
1057 * from SP. Needed for inline crypto transport mode, as addresses are not
1058 * provided in config file for that mode. It checks if SP for current SA exists,
1059 * and based on what type of protocol is returned, it stores appropriate
1060 * addresses got from SP into SA.
1063 sa_add_address_inline_crypto(struct ipsec_sa *sa)
1066 struct ip_addr ip_addr[2];
1069 protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
1072 else if (protocol == IPPROTO_IPIP) {
1073 sa->flags |= IP4_TRANSPORT;
1074 if (mask[0] == IP4_FULL_MASK &&
1075 mask[1] == IP4_FULL_MASK &&
1076 ip_addr[0].ip.ip4 != 0 &&
1077 ip_addr[1].ip.ip4 != 0) {
1079 sa->src.ip.ip4 = ip_addr[0].ip.ip4;
1080 sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
1083 "%s: No valid address or mask entry in"
1084 " IPv4 SP rule for SPI %u\n",
1088 } else if (protocol == IPPROTO_IPV6) {
1089 sa->flags |= IP6_TRANSPORT;
1090 if (mask[0] == IP6_FULL_MASK &&
1091 mask[1] == IP6_FULL_MASK &&
1092 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
1093 ip_addr[0].ip.ip6.ip6[1] != 0) &&
1094 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
1095 ip_addr[1].ip.ip6.ip6[1] != 0)) {
1097 sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1098 sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1101 "%s: No valid address or mask entry in"
1102 " IPv6 SP rule for SPI %u\n",
1111 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1112 uint32_t nb_entries, uint32_t inbound,
1113 struct socket_ctx *skt_ctx)
1115 struct ipsec_sa *sa;
1117 uint16_t iv_length, aad_length;
1120 struct rte_ipsec_session *ips;
1122 /* for ESN upper 32 bits of SQN also need to be part of AAD */
1123 aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1125 for (i = 0; i < nb_entries; i++) {
1127 sa = &sa_ctx->sa[idx];
1129 printf("Index %u already in use by SPI %u\n",
1136 rc = ipsec_sad_add(&sa_ctx->sad, sa);
1142 ips = ipsec_get_primary_session(sa);
1144 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1145 ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1146 if (check_eth_dev_caps(sa->portid, inbound, sa->mss))
1150 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1152 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1153 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1157 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1159 sa_add_address_inline_crypto(sa);
1160 if (inline_status < 0)
1161 return inline_status;
1166 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
1169 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1170 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1171 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1172 sa_ctx->xf[idx].a.aead.key.length =
1174 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1175 RTE_CRYPTO_AEAD_OP_DECRYPT :
1176 RTE_CRYPTO_AEAD_OP_ENCRYPT;
1177 sa_ctx->xf[idx].a.next = NULL;
1178 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1179 sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1180 sa_ctx->xf[idx].a.aead.aad_length =
1181 sa->aad_len + aad_length;
1182 sa_ctx->xf[idx].a.aead.digest_length =
1185 sa->xforms = &sa_ctx->xf[idx].a;
1187 switch (sa->cipher_algo) {
1188 case RTE_CRYPTO_CIPHER_NULL:
1189 case RTE_CRYPTO_CIPHER_3DES_CBC:
1190 case RTE_CRYPTO_CIPHER_AES_CBC:
1191 iv_length = sa->iv_len;
1193 case RTE_CRYPTO_CIPHER_AES_CTR:
1197 RTE_LOG(ERR, IPSEC_ESP,
1198 "unsupported cipher algorithm %u\n",
1204 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1205 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1206 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1207 sa_ctx->xf[idx].b.cipher.key.length =
1209 sa_ctx->xf[idx].b.cipher.op =
1210 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1211 sa_ctx->xf[idx].b.next = NULL;
1212 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1213 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1215 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1216 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1217 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1218 sa_ctx->xf[idx].a.auth.key.length =
1220 sa_ctx->xf[idx].a.auth.digest_length =
1222 sa_ctx->xf[idx].a.auth.op =
1223 RTE_CRYPTO_AUTH_OP_VERIFY;
1224 } else { /* outbound */
1225 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1226 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1227 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1228 sa_ctx->xf[idx].a.cipher.key.length =
1230 sa_ctx->xf[idx].a.cipher.op =
1231 RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1232 sa_ctx->xf[idx].a.next = NULL;
1233 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1234 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1236 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1237 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1238 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1239 sa_ctx->xf[idx].b.auth.key.length =
1241 sa_ctx->xf[idx].b.auth.digest_length =
1243 sa_ctx->xf[idx].b.auth.op =
1244 RTE_CRYPTO_AUTH_OP_GENERATE;
1247 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1248 sa_ctx->xf[idx].b.next = NULL;
1249 sa->xforms = &sa_ctx->xf[idx].a;
1253 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1255 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1256 rc = create_inline_session(skt_ctx, sa, ips);
1258 RTE_LOG(ERR, IPSEC_ESP,
1259 "create_inline_session() failed\n");
1264 if (sa->fdir_flag && inbound) {
1265 rc = create_ipsec_esp_flow(sa);
1267 RTE_LOG(ERR, IPSEC_ESP,
1268 "create_ipsec_esp_flow() failed\n");
1270 print_one_sa_rule(sa, inbound);
1277 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1278 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1280 return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1284 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1285 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1287 return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1291 * helper function, fills parameters that are identical for all SAs
1294 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1295 const struct app_sa_prm *app_prm)
1297 memset(prm, 0, sizeof(*prm));
1299 prm->flags = app_prm->flags;
1300 prm->ipsec_xform.options.esn = app_prm->enable_esn;
1301 prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1305 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1306 const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1311 * Try to get SPI next proto by searching that SPI in SPD.
1312 * probably not the optimal way, but there seems nothing
1315 rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1319 fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1320 prm->userdata = (uintptr_t)ss;
1322 /* setup ipsec xform */
1323 prm->ipsec_xform.spi = ss->spi;
1324 prm->ipsec_xform.salt = ss->salt;
1325 prm->ipsec_xform.direction = ss->direction;
1326 prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1327 prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1328 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1329 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1330 prm->ipsec_xform.options.ecn = 1;
1331 prm->ipsec_xform.options.copy_dscp = 1;
1333 if (IS_IP4_TUNNEL(ss->flags)) {
1334 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1335 prm->tun.hdr_len = sizeof(*v4);
1336 prm->tun.next_proto = rc;
1338 } else if (IS_IP6_TUNNEL(ss->flags)) {
1339 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1340 prm->tun.hdr_len = sizeof(*v6);
1341 prm->tun.next_proto = rc;
1344 /* transport mode */
1345 prm->trs.proto = rc;
1348 /* setup crypto section */
1349 prm->crypto_xform = ss->xforms;
1354 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1360 if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1361 ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1362 if (ss->security.ses != NULL) {
1363 rc = rte_ipsec_session_prepare(ss);
1365 memset(ss, 0, sizeof(*ss));
1373 * Initialise related rte_ipsec_sa object.
1376 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1379 struct rte_ipsec_sa_prm prm;
1380 struct rte_ipsec_session *ips;
1381 struct rte_ipv4_hdr v4 = {
1382 .version_ihl = IPVERSION << 4 |
1383 sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1384 .time_to_live = IPDEFTTL,
1385 .next_proto_id = IPPROTO_ESP,
1386 .src_addr = lsa->src.ip.ip4,
1387 .dst_addr = lsa->dst.ip.ip4,
1389 struct rte_ipv6_hdr v6 = {
1390 .vtc_flow = htonl(IP6_VERSION << 28),
1391 .proto = IPPROTO_ESP,
1394 if (IS_IP6_TUNNEL(lsa->flags)) {
1395 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1396 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1399 rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1401 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1405 /* init primary processing session */
1406 ips = ipsec_get_primary_session(lsa);
1407 rc = fill_ipsec_session(ips, sa);
1411 /* init inline fallback processing session */
1412 if (lsa->fallback_sessions == 1)
1413 rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1419 * Allocate space and init rte_ipsec_sa strcutures,
1423 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1428 struct rte_ipsec_sa *sa;
1429 struct ipsec_sa *lsa;
1430 struct rte_ipsec_sa_prm prm;
1432 /* determine SA size */
1434 fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1435 sz = rte_ipsec_sa_size(&prm);
1437 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1438 "failed to determine SA size, error code: %d\n",
1439 __func__, ctx, nb_ent, socket, sz);
1445 ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1446 if (ctx->satbl == NULL) {
1448 "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1449 __func__, ctx, nb_ent, socket, tsz);
1454 for (i = 0; i != nb_ent && rc == 0; i++) {
1458 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1459 lsa = ctx->sa + idx;
1461 rc = ipsec_sa_init(lsa, sa, sz);
1468 sa_cmp(const void *p, const void *q)
1470 uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1471 uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1473 return (int)(spi1 - spi2);
1477 * Walk through all SA rules to find an SA with given SPI
1480 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1483 struct ipsec_sa *sa;
1484 struct ipsec_sa tmpl;
1485 const struct ipsec_sa *sar;
1495 sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1497 return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1503 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1509 rte_exit(EXIT_FAILURE, "NULL context.\n");
1511 if (ctx->sa_in != NULL)
1512 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1513 "initialized\n", socket_id);
1515 if (ctx->sa_out != NULL)
1516 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1517 "initialized\n", socket_id);
1521 ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1522 if (ctx->sa_in == NULL)
1523 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1524 "context %s in socket %d\n", rte_errno,
1527 rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1530 rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1532 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1534 if (app_sa_prm.enable != 0) {
1535 rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1538 rte_exit(EXIT_FAILURE,
1539 "failed to init inbound SAs\n");
1542 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1544 if (nb_sa_out > 0) {
1546 ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1547 if (ctx->sa_out == NULL)
1548 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1549 "context %s in socket %d\n", rte_errno,
1552 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1554 if (app_sa_prm.enable != 0) {
1555 rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1558 rte_exit(EXIT_FAILURE,
1559 "failed to init outbound SAs\n");
1562 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1567 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1569 struct ipsec_mbuf_metadata *priv;
1570 struct ipsec_sa *sa;
1575 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1577 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1582 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1583 void *sa_arr[], uint16_t nb_pkts)
1587 struct ipsec_sa *sa;
1589 sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1592 * Mark need for inline offload fallback on the LSB of SA pointer.
1593 * Thanks to packet grouping mechanism which ipsec_process is using
1594 * packets marked for fallback processing will form separate group.
1596 * Because it is not safe to use SA pointer it is casted to generic
1597 * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1598 * to get valid struct pointer.
1600 for (i = 0; i < nb_pkts; i++) {
1601 if (sa_arr[i] == NULL)
1604 result_sa = sa = sa_arr[i];
1605 if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1606 sa->fallback_sessions > 0) {
1607 uintptr_t intsa = (uintptr_t)sa;
1608 intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1609 result_sa = (void *)intsa;
1611 sa_arr[i] = result_sa;
1616 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1617 void *sa[], uint16_t nb_pkts)
1621 for (i = 0; i < nb_pkts; i++)
1622 sa[i] = &sa_ctx->sa[sa_idx[i]];
1626 * Select HW offloads to be used.
1629 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1630 uint64_t *tx_offloads)
1632 struct ipsec_sa *rule;
1634 enum rte_security_session_action_type rule_type;
1639 /* Check for inbound rules that use offloads and use this port */
1640 for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1641 rule = &sa_in[idx_sa];
1642 rule_type = ipsec_get_action_type(rule);
1643 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1645 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1646 && rule->portid == port_id)
1647 *rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
1650 /* Check for outbound rules that use offloads and use this port */
1651 for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1652 rule = &sa_out[idx_sa];
1653 rule_type = ipsec_get_action_type(rule);
1654 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1656 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1657 && rule->portid == port_id) {
1658 *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
1660 *tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
1669 qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1670 qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1674 get_nb_crypto_sessions(void)
1676 return nb_crypto_sessions;