1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
6 * Security Associations
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22 #include <rte_malloc.h>
30 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
32 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
34 struct supported_cipher_algo {
36 enum rte_crypto_cipher_algorithm algo;
42 struct supported_auth_algo {
44 enum rte_crypto_auth_algorithm algo;
50 struct supported_aead_algo {
52 enum rte_crypto_aead_algorithm algo;
61 const struct supported_cipher_algo cipher_algos[] = {
64 .algo = RTE_CRYPTO_CIPHER_NULL,
70 .keyword = "aes-128-cbc",
71 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
77 .keyword = "aes-256-cbc",
78 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
84 .keyword = "aes-128-ctr",
85 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
91 .keyword = "3des-cbc",
92 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
99 const struct supported_auth_algo auth_algos[] = {
102 .algo = RTE_CRYPTO_AUTH_NULL,
108 .keyword = "sha1-hmac",
109 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
114 .keyword = "sha256-hmac",
115 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
121 const struct supported_aead_algo aead_algos[] = {
123 .keyword = "aes-128-gcm",
124 .algo = RTE_CRYPTO_AEAD_AES_GCM,
133 static struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES];
134 static uint32_t nb_sa_out;
136 static struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES];
137 static uint32_t nb_sa_in;
139 static const struct supported_cipher_algo *
140 find_match_cipher_algo(const char *cipher_keyword)
144 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
145 const struct supported_cipher_algo *algo =
148 if (strcmp(cipher_keyword, algo->keyword) == 0)
155 static const struct supported_auth_algo *
156 find_match_auth_algo(const char *auth_keyword)
160 for (i = 0; i < RTE_DIM(auth_algos); i++) {
161 const struct supported_auth_algo *algo =
164 if (strcmp(auth_keyword, algo->keyword) == 0)
171 static const struct supported_aead_algo *
172 find_match_aead_algo(const char *aead_keyword)
176 for (i = 0; i < RTE_DIM(aead_algos); i++) {
177 const struct supported_aead_algo *algo =
180 if (strcmp(aead_keyword, algo->keyword) == 0)
188 * parse x:x:x:x.... hex number key string into uint8_t *key
190 * > 0: number of bytes parsed
194 parse_key_string(const char *key_str, uint8_t *key)
196 const char *pt_start = key_str, *pt_end = key_str;
197 uint32_t nb_bytes = 0;
199 while (pt_end != NULL) {
200 char sub_str[3] = {0};
202 pt_end = strchr(pt_start, ':');
204 if (pt_end == NULL) {
205 if (strlen(pt_start) > 2)
207 strncpy(sub_str, pt_start, 2);
209 if (pt_end - pt_start > 2)
212 strncpy(sub_str, pt_start, pt_end - pt_start);
213 pt_start = pt_end + 1;
216 key[nb_bytes++] = strtol(sub_str, NULL, 16);
223 parse_sa_tokens(char **tokens, uint32_t n_tokens,
224 struct parse_status *status)
226 struct ipsec_sa *rule = NULL;
227 struct rte_ipsec_session *ips;
228 uint32_t ti; /*token index*/
229 uint32_t *ri /*rule index*/;
230 uint32_t cipher_algo_p = 0;
231 uint32_t auth_algo_p = 0;
232 uint32_t aead_algo_p = 0;
237 uint32_t portid_p = 0;
239 if (strcmp(tokens[0], "in") == 0) {
242 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
243 "too many sa rules, abort insertion\n");
244 if (status->status < 0)
251 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
252 "too many sa rules, abort insertion\n");
253 if (status->status < 0)
260 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
261 if (status->status < 0)
263 if (atoi(tokens[1]) == INVALID_SPI)
265 rule->spi = atoi(tokens[1]);
266 ips = ipsec_get_session(rule);
268 for (ti = 2; ti < n_tokens; ti++) {
269 if (strcmp(tokens[ti], "mode") == 0) {
270 APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
271 if (status->status < 0)
274 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
275 if (status->status < 0)
278 if (strcmp(tokens[ti], "ipv4-tunnel") == 0)
279 rule->flags = IP4_TUNNEL;
280 else if (strcmp(tokens[ti], "ipv6-tunnel") == 0)
281 rule->flags = IP6_TUNNEL;
282 else if (strcmp(tokens[ti], "transport") == 0)
283 rule->flags = TRANSPORT;
285 APP_CHECK(0, status, "unrecognized "
286 "input \"%s\"", tokens[ti]);
294 if (strcmp(tokens[ti], "cipher_algo") == 0) {
295 const struct supported_cipher_algo *algo;
298 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
300 if (status->status < 0)
303 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
304 if (status->status < 0)
307 algo = find_match_cipher_algo(tokens[ti]);
309 APP_CHECK(algo != NULL, status, "unrecognized "
310 "input \"%s\"", tokens[ti]);
312 rule->cipher_algo = algo->algo;
313 rule->block_size = algo->block_size;
314 rule->iv_len = algo->iv_len;
315 rule->cipher_key_len = algo->key_len;
317 /* for NULL algorithm, no cipher key required */
318 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
323 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
324 if (status->status < 0)
327 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
328 status, "unrecognized input \"%s\", "
329 "expect \"cipher_key\"", tokens[ti]);
330 if (status->status < 0)
333 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
334 if (status->status < 0)
337 key_len = parse_key_string(tokens[ti],
339 APP_CHECK(key_len == rule->cipher_key_len, status,
340 "unrecognized input \"%s\"", tokens[ti]);
341 if (status->status < 0)
344 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
345 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
346 rule->salt = (uint32_t)rte_rand();
348 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
350 rule->cipher_key_len = key_len;
352 &rule->cipher_key[key_len], 4);
359 if (strcmp(tokens[ti], "auth_algo") == 0) {
360 const struct supported_auth_algo *algo;
363 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
365 if (status->status < 0)
368 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
369 if (status->status < 0)
372 algo = find_match_auth_algo(tokens[ti]);
373 APP_CHECK(algo != NULL, status, "unrecognized "
374 "input \"%s\"", tokens[ti]);
376 rule->auth_algo = algo->algo;
377 rule->auth_key_len = algo->key_len;
378 rule->digest_len = algo->digest_len;
380 /* NULL algorithm and combined algos do not
383 if (algo->key_not_req) {
388 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
389 if (status->status < 0)
392 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
393 status, "unrecognized input \"%s\", "
394 "expect \"auth_key\"", tokens[ti]);
395 if (status->status < 0)
398 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
399 if (status->status < 0)
402 key_len = parse_key_string(tokens[ti],
404 APP_CHECK(key_len == rule->auth_key_len, status,
405 "unrecognized input \"%s\"", tokens[ti]);
406 if (status->status < 0)
413 if (strcmp(tokens[ti], "aead_algo") == 0) {
414 const struct supported_aead_algo *algo;
417 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
419 if (status->status < 0)
422 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
423 if (status->status < 0)
426 algo = find_match_aead_algo(tokens[ti]);
428 APP_CHECK(algo != NULL, status, "unrecognized "
429 "input \"%s\"", tokens[ti]);
431 rule->aead_algo = algo->algo;
432 rule->cipher_key_len = algo->key_len;
433 rule->digest_len = algo->digest_len;
434 rule->aad_len = algo->aad_len;
435 rule->block_size = algo->block_size;
436 rule->iv_len = algo->iv_len;
438 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
439 if (status->status < 0)
442 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
443 status, "unrecognized input \"%s\", "
444 "expect \"aead_key\"", tokens[ti]);
445 if (status->status < 0)
448 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
449 if (status->status < 0)
452 key_len = parse_key_string(tokens[ti],
454 APP_CHECK(key_len == rule->cipher_key_len, status,
455 "unrecognized input \"%s\"", tokens[ti]);
456 if (status->status < 0)
460 rule->cipher_key_len = key_len;
462 &rule->cipher_key[key_len], 4);
468 if (strcmp(tokens[ti], "src") == 0) {
469 APP_CHECK_PRESENCE(src_p, tokens[ti], status);
470 if (status->status < 0)
473 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
474 if (status->status < 0)
477 if (IS_IP4_TUNNEL(rule->flags)) {
480 APP_CHECK(parse_ipv4_addr(tokens[ti],
481 &ip, NULL) == 0, status,
482 "unrecognized input \"%s\", "
483 "expect valid ipv4 addr",
485 if (status->status < 0)
487 rule->src.ip.ip4 = rte_bswap32(
488 (uint32_t)ip.s_addr);
489 } else if (IS_IP6_TUNNEL(rule->flags)) {
492 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
494 "unrecognized input \"%s\", "
495 "expect valid ipv6 addr",
497 if (status->status < 0)
499 memcpy(rule->src.ip.ip6.ip6_b,
501 } else if (IS_TRANSPORT(rule->flags)) {
502 APP_CHECK(0, status, "unrecognized input "
503 "\"%s\"", tokens[ti]);
511 if (strcmp(tokens[ti], "dst") == 0) {
512 APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
513 if (status->status < 0)
516 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
517 if (status->status < 0)
520 if (IS_IP4_TUNNEL(rule->flags)) {
523 APP_CHECK(parse_ipv4_addr(tokens[ti],
524 &ip, NULL) == 0, status,
525 "unrecognized input \"%s\", "
526 "expect valid ipv4 addr",
528 if (status->status < 0)
530 rule->dst.ip.ip4 = rte_bswap32(
531 (uint32_t)ip.s_addr);
532 } else if (IS_IP6_TUNNEL(rule->flags)) {
535 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
537 "unrecognized input \"%s\", "
538 "expect valid ipv6 addr",
540 if (status->status < 0)
542 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
543 } else if (IS_TRANSPORT(rule->flags)) {
544 APP_CHECK(0, status, "unrecognized "
545 "input \"%s\"", tokens[ti]);
553 if (strcmp(tokens[ti], "type") == 0) {
554 APP_CHECK_PRESENCE(type_p, tokens[ti], status);
555 if (status->status < 0)
558 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
559 if (status->status < 0)
562 if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
564 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
565 else if (strcmp(tokens[ti],
566 "inline-protocol-offload") == 0)
568 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
569 else if (strcmp(tokens[ti],
570 "lookaside-protocol-offload") == 0)
572 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
573 else if (strcmp(tokens[ti], "no-offload") == 0)
574 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
576 APP_CHECK(0, status, "Invalid input \"%s\"",
585 if (strcmp(tokens[ti], "port_id") == 0) {
586 APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
587 if (status->status < 0)
589 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
590 if (status->status < 0)
592 rule->portid = atoi(tokens[ti]);
593 if (status->status < 0)
599 /* unrecognizeable input */
600 APP_CHECK(0, status, "unrecognized input \"%s\"",
606 APP_CHECK(cipher_algo_p == 0, status,
607 "AEAD used, no need for cipher options");
608 if (status->status < 0)
611 APP_CHECK(auth_algo_p == 0, status,
612 "AEAD used, no need for auth options");
613 if (status->status < 0)
616 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
617 if (status->status < 0)
620 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
621 if (status->status < 0)
625 APP_CHECK(mode_p == 1, status, "missing mode option");
626 if (status->status < 0)
629 if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
630 printf("Missing portid option, falling back to non-offload\n");
632 if (!type_p || !portid_p) {
633 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
641 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
645 const struct rte_ipsec_session *ips;
647 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
649 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
650 if (cipher_algos[i].algo == sa->cipher_algo &&
651 cipher_algos[i].key_len == sa->cipher_key_len) {
652 printf("%s ", cipher_algos[i].keyword);
657 for (i = 0; i < RTE_DIM(auth_algos); i++) {
658 if (auth_algos[i].algo == sa->auth_algo) {
659 printf("%s ", auth_algos[i].keyword);
664 for (i = 0; i < RTE_DIM(aead_algos); i++) {
665 if (aead_algos[i].algo == sa->aead_algo) {
666 printf("%s ", aead_algos[i].keyword);
673 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
675 printf("IP4Tunnel ");
676 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
677 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
678 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
679 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
682 printf("IP6Tunnel ");
683 for (i = 0; i < 16; i++) {
684 if (i % 2 && i != 15)
685 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
687 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
690 for (i = 0; i < 16; i++) {
691 if (i % 2 && i != 15)
692 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
694 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
698 printf("Transport ");
705 case RTE_SECURITY_ACTION_TYPE_NONE:
706 printf("no-offload ");
708 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
709 printf("inline-crypto-offload ");
711 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
712 printf("inline-protocol-offload ");
714 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
715 printf("lookaside-protocol-offload ");
722 void *satbl; /* pointer to array of rte_ipsec_sa objects*/
723 struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
726 struct rte_crypto_sym_xform a;
727 struct rte_crypto_sym_xform b;
729 } xf[IPSEC_SA_MAX_ENTRIES];
732 static struct sa_ctx *
733 sa_create(const char *name, int32_t socket_id)
736 struct sa_ctx *sa_ctx;
738 const struct rte_memzone *mz;
740 snprintf(s, sizeof(s), "%s_%u", name, socket_id);
742 /* Create SA array table */
743 printf("Creating SA context with %u maximum entries on socket %d\n",
744 IPSEC_SA_MAX_ENTRIES, socket_id);
746 mz_size = sizeof(struct sa_ctx);
747 mz = rte_memzone_reserve(s, mz_size, socket_id,
748 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
750 printf("Failed to allocate SA DB memory\n");
755 sa_ctx = (struct sa_ctx *)mz->addr;
761 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
763 struct rte_eth_dev_info dev_info;
766 retval = rte_eth_dev_info_get(portid, &dev_info);
769 "Error during getting device (port %u) info: %s\n",
770 portid, strerror(-retval));
776 if ((dev_info.rx_offload_capa &
777 DEV_RX_OFFLOAD_SECURITY) == 0) {
778 RTE_LOG(WARNING, PORT,
779 "hardware RX IPSec offload is not supported\n");
783 } else { /* outbound */
784 if ((dev_info.tx_offload_capa &
785 DEV_TX_OFFLOAD_SECURITY) == 0) {
786 RTE_LOG(WARNING, PORT,
787 "hardware TX IPSec offload is not supported\n");
795 * Helper function, tries to determine next_proto for SPI
796 * by searching though SP rules.
799 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
800 struct ip_addr ip_addr[2], uint32_t mask[2])
804 rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
806 rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
812 "%s: SPI %u used simultaeously by "
813 "IPv4(%d) and IPv6 (%d) SP rules\n",
814 __func__, spi, rc4, rc6);
818 } else if (rc6 < 0) {
820 "%s: SPI %u is not used by any SP rule\n",
828 * Helper function for getting source and destination IP addresses
829 * from SP. Needed for inline crypto transport mode, as addresses are not
830 * provided in config file for that mode. It checks if SP for current SA exists,
831 * and based on what type of protocol is returned, it stores appropriate
832 * addresses got from SP into SA.
835 sa_add_address_inline_crypto(struct ipsec_sa *sa)
838 struct ip_addr ip_addr[2];
841 protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
844 else if (protocol == IPPROTO_IPIP) {
845 sa->flags |= IP4_TRANSPORT;
846 if (mask[0] == IP4_FULL_MASK &&
847 mask[1] == IP4_FULL_MASK &&
848 ip_addr[0].ip.ip4 != 0 &&
849 ip_addr[1].ip.ip4 != 0) {
851 sa->src.ip.ip4 = ip_addr[0].ip.ip4;
852 sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
855 "%s: No valid address or mask entry in"
856 " IPv4 SP rule for SPI %u\n",
860 } else if (protocol == IPPROTO_IPV6) {
861 sa->flags |= IP6_TRANSPORT;
862 if (mask[0] == IP6_FULL_MASK &&
863 mask[1] == IP6_FULL_MASK &&
864 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
865 ip_addr[0].ip.ip6.ip6[1] != 0) &&
866 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
867 ip_addr[1].ip.ip6.ip6[1] != 0)) {
869 sa->src.ip.ip6 = ip_addr[0].ip.ip6;
870 sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
873 "%s: No valid address or mask entry in"
874 " IPv6 SP rule for SPI %u\n",
883 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
884 uint32_t nb_entries, uint32_t inbound,
885 struct socket_ctx *skt_ctx)
889 uint16_t iv_length, aad_length;
892 struct rte_ipsec_session *ips;
894 /* for ESN upper 32 bits of SQN also need to be part of AAD */
895 aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
897 for (i = 0; i < nb_entries; i++) {
898 idx = SPI2IDX(entries[i].spi);
899 sa = &sa_ctx->sa[idx];
901 printf("Index %u already in use by SPI %u\n",
907 ips = ipsec_get_session(sa);
909 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
910 ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
911 if (check_eth_dev_caps(sa->portid, inbound))
915 sa->direction = (inbound == 1) ?
916 RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
917 RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
919 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
921 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
922 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
926 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
928 sa_add_address_inline_crypto(sa);
929 if (inline_status < 0)
930 return inline_status;
935 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
936 struct rte_ipsec_session *ips;
939 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
940 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
941 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
942 sa_ctx->xf[idx].a.aead.key.length =
944 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
945 RTE_CRYPTO_AEAD_OP_DECRYPT :
946 RTE_CRYPTO_AEAD_OP_ENCRYPT;
947 sa_ctx->xf[idx].a.next = NULL;
948 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
949 sa_ctx->xf[idx].a.aead.iv.length = iv_length;
950 sa_ctx->xf[idx].a.aead.aad_length =
951 sa->aad_len + aad_length;
952 sa_ctx->xf[idx].a.aead.digest_length =
955 sa->xforms = &sa_ctx->xf[idx].a;
957 ips = ipsec_get_session(sa);
959 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
961 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
962 rc = create_inline_session(skt_ctx, sa, ips);
964 RTE_LOG(ERR, IPSEC_ESP,
965 "create_inline_session() failed\n");
969 print_one_sa_rule(sa, inbound);
971 switch (sa->cipher_algo) {
972 case RTE_CRYPTO_CIPHER_NULL:
973 case RTE_CRYPTO_CIPHER_3DES_CBC:
974 case RTE_CRYPTO_CIPHER_AES_CBC:
975 iv_length = sa->iv_len;
977 case RTE_CRYPTO_CIPHER_AES_CTR:
981 RTE_LOG(ERR, IPSEC_ESP,
982 "unsupported cipher algorithm %u\n",
988 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
989 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
990 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
991 sa_ctx->xf[idx].b.cipher.key.length =
993 sa_ctx->xf[idx].b.cipher.op =
994 RTE_CRYPTO_CIPHER_OP_DECRYPT;
995 sa_ctx->xf[idx].b.next = NULL;
996 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
997 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
999 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1000 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1001 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1002 sa_ctx->xf[idx].a.auth.key.length =
1004 sa_ctx->xf[idx].a.auth.digest_length =
1006 sa_ctx->xf[idx].a.auth.op =
1007 RTE_CRYPTO_AUTH_OP_VERIFY;
1008 } else { /* outbound */
1009 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1010 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1011 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1012 sa_ctx->xf[idx].a.cipher.key.length =
1014 sa_ctx->xf[idx].a.cipher.op =
1015 RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1016 sa_ctx->xf[idx].a.next = NULL;
1017 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1018 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1020 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1021 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1022 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1023 sa_ctx->xf[idx].b.auth.key.length =
1025 sa_ctx->xf[idx].b.auth.digest_length =
1027 sa_ctx->xf[idx].b.auth.op =
1028 RTE_CRYPTO_AUTH_OP_GENERATE;
1031 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1032 sa_ctx->xf[idx].b.next = NULL;
1033 sa->xforms = &sa_ctx->xf[idx].a;
1035 print_one_sa_rule(sa, inbound);
1043 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1044 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1046 return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1050 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1051 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1053 return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1057 * helper function, fills parameters that are identical for all SAs
1060 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1061 const struct app_sa_prm *app_prm)
1063 memset(prm, 0, sizeof(*prm));
1065 prm->flags = app_prm->flags;
1066 prm->ipsec_xform.options.esn = app_prm->enable_esn;
1067 prm->replay_win_sz = app_prm->window_size;
1071 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1072 const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1077 * Try to get SPI next proto by searching that SPI in SPD.
1078 * probably not the optimal way, but there seems nothing
1081 rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1085 fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1086 prm->userdata = (uintptr_t)ss;
1088 /* setup ipsec xform */
1089 prm->ipsec_xform.spi = ss->spi;
1090 prm->ipsec_xform.salt = ss->salt;
1091 prm->ipsec_xform.direction = ss->direction;
1092 prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1093 prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1094 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1095 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1096 prm->ipsec_xform.options.ecn = 1;
1097 prm->ipsec_xform.options.copy_dscp = 1;
1099 if (IS_IP4_TUNNEL(ss->flags)) {
1100 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1101 prm->tun.hdr_len = sizeof(*v4);
1102 prm->tun.next_proto = rc;
1104 } else if (IS_IP6_TUNNEL(ss->flags)) {
1105 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1106 prm->tun.hdr_len = sizeof(*v6);
1107 prm->tun.next_proto = rc;
1110 /* transport mode */
1111 prm->trs.proto = rc;
1114 /* setup crypto section */
1115 prm->crypto_xform = ss->xforms;
1120 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1126 if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1127 ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1128 if (ss->security.ses != NULL) {
1129 rc = rte_ipsec_session_prepare(ss);
1131 memset(ss, 0, sizeof(*ss));
1139 * Initialise related rte_ipsec_sa object.
1142 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1145 struct rte_ipsec_sa_prm prm;
1146 struct rte_ipsec_session *ips;
1147 struct rte_ipv4_hdr v4 = {
1148 .version_ihl = IPVERSION << 4 |
1149 sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1150 .time_to_live = IPDEFTTL,
1151 .next_proto_id = IPPROTO_ESP,
1152 .src_addr = lsa->src.ip.ip4,
1153 .dst_addr = lsa->dst.ip.ip4,
1155 struct rte_ipv6_hdr v6 = {
1156 .vtc_flow = htonl(IP6_VERSION << 28),
1157 .proto = IPPROTO_ESP,
1160 if (IS_IP6_TUNNEL(lsa->flags)) {
1161 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1162 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1165 rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1167 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1171 /* init processing session */
1172 ips = ipsec_get_session(lsa);
1173 rc = fill_ipsec_session(ips, sa);
1179 * Allocate space and init rte_ipsec_sa strcutures,
1183 ipsec_satbl_init(struct sa_ctx *ctx, const struct ipsec_sa *ent,
1184 uint32_t nb_ent, int32_t socket)
1189 struct rte_ipsec_sa *sa;
1190 struct ipsec_sa *lsa;
1191 struct rte_ipsec_sa_prm prm;
1193 /* determine SA size */
1194 idx = SPI2IDX(ent[0].spi);
1195 fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1196 sz = rte_ipsec_sa_size(&prm);
1198 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1199 "failed to determine SA size, error code: %d\n",
1200 __func__, ctx, nb_ent, socket, sz);
1206 ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1207 if (ctx->satbl == NULL) {
1209 "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1210 __func__, ctx, nb_ent, socket, tsz);
1215 for (i = 0; i != nb_ent && rc == 0; i++) {
1217 idx = SPI2IDX(ent[i].spi);
1219 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1220 lsa = ctx->sa + idx;
1222 rc = ipsec_sa_init(lsa, sa, sz);
1229 * Walk through all SA rules to find an SA with given SPI
1232 sa_spi_present(uint32_t spi, int inbound)
1235 const struct ipsec_sa *sar;
1245 for (i = 0; i != num; i++) {
1246 if (sar[i].spi == spi)
1254 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1260 rte_exit(EXIT_FAILURE, "NULL context.\n");
1262 if (ctx->sa_in != NULL)
1263 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1264 "initialized\n", socket_id);
1266 if (ctx->sa_out != NULL)
1267 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1268 "initialized\n", socket_id);
1272 ctx->sa_in = sa_create(name, socket_id);
1273 if (ctx->sa_in == NULL)
1274 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1275 "context %s in socket %d\n", rte_errno,
1278 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1280 if (app_sa_prm.enable != 0) {
1281 rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
1284 rte_exit(EXIT_FAILURE,
1285 "failed to init inbound SAs\n");
1288 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1290 if (nb_sa_out > 0) {
1292 ctx->sa_out = sa_create(name, socket_id);
1293 if (ctx->sa_out == NULL)
1294 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1295 "context %s in socket %d\n", rte_errno,
1298 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1300 if (app_sa_prm.enable != 0) {
1301 rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
1304 rte_exit(EXIT_FAILURE,
1305 "failed to init outbound SAs\n");
1308 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1313 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1315 struct ipsec_mbuf_metadata *priv;
1316 struct ipsec_sa *sa;
1321 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1323 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1328 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
1329 struct ipsec_sa **sa_ret)
1331 struct rte_esp_hdr *esp;
1333 uint32_t *src4_addr;
1335 struct ipsec_sa *sa;
1339 ip = rte_pktmbuf_mtod(pkt, struct ip *);
1340 esp = rte_pktmbuf_mtod_offset(pkt, struct rte_esp_hdr *, pkt->l3_len);
1342 if (esp->spi == INVALID_SPI)
1345 sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
1346 if (rte_be_to_cpu_32(esp->spi) != sa->spi)
1349 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1351 src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
1352 if ((ip->ip_v == IPVERSION) &&
1353 (sa->src.ip.ip4 == *src4_addr) &&
1354 (sa->dst.ip.ip4 == *(src4_addr + 1)))
1358 src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
1359 if ((ip->ip_v == IP6_VERSION) &&
1360 !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
1361 !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
1370 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1371 struct ipsec_sa *sa[], uint16_t nb_pkts)
1375 for (i = 0; i < nb_pkts; i++)
1376 single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
1380 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1381 struct ipsec_sa *sa[], uint16_t nb_pkts)
1385 for (i = 0; i < nb_pkts; i++)
1386 sa[i] = &sa_ctx->sa[sa_idx[i]];
1390 * Select HW offloads to be used.
1393 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1394 uint64_t *tx_offloads)
1396 struct ipsec_sa *rule;
1398 enum rte_security_session_action_type rule_type;
1403 /* Check for inbound rules that use offloads and use this port */
1404 for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1405 rule = &sa_in[idx_sa];
1406 rule_type = ipsec_get_action_type(rule);
1407 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1409 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1410 && rule->portid == port_id)
1411 *rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
1414 /* Check for outbound rules that use offloads and use this port */
1415 for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1416 rule = &sa_out[idx_sa];
1417 rule_type = ipsec_get_action_type(rule);
1418 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1420 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1421 && rule->portid == port_id)
1422 *tx_offloads |= DEV_TX_OFFLOAD_SECURITY;