1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
6 * Security Associations
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
21 #include <rte_random.h>
22 #include <rte_ethdev.h>
23 #include <rte_malloc.h>
32 #define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT)
34 #define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT)
36 #define MBUF_NO_SEC_OFFLOAD(m) ((m->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) == 0)
38 struct supported_cipher_algo {
40 enum rte_crypto_cipher_algorithm algo;
46 struct supported_auth_algo {
48 enum rte_crypto_auth_algorithm algo;
55 struct supported_aead_algo {
57 enum rte_crypto_aead_algorithm algo;
66 const struct supported_cipher_algo cipher_algos[] = {
69 .algo = RTE_CRYPTO_CIPHER_NULL,
75 .keyword = "aes-128-cbc",
76 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
82 .keyword = "aes-192-cbc",
83 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
89 .keyword = "aes-256-cbc",
90 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
96 .keyword = "aes-128-ctr",
97 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
103 .keyword = "aes-192-ctr",
104 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
110 .keyword = "aes-256-ctr",
111 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
117 .keyword = "3des-cbc",
118 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
125 const struct supported_auth_algo auth_algos[] = {
128 .algo = RTE_CRYPTO_AUTH_NULL,
134 .keyword = "sha1-hmac",
135 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
140 .keyword = "sha256-hmac",
141 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
146 .keyword = "sha384-hmac",
147 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
152 .keyword = "sha512-hmac",
153 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
158 .keyword = "aes-gmac",
159 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
165 .keyword = "aes-xcbc-mac-96",
166 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
172 const struct supported_aead_algo aead_algos[] = {
174 .keyword = "aes-128-gcm",
175 .algo = RTE_CRYPTO_AEAD_AES_GCM,
183 .keyword = "aes-192-gcm",
184 .algo = RTE_CRYPTO_AEAD_AES_GCM,
192 .keyword = "aes-256-gcm",
193 .algo = RTE_CRYPTO_AEAD_AES_GCM,
201 .keyword = "aes-128-ccm",
202 .algo = RTE_CRYPTO_AEAD_AES_CCM,
210 .keyword = "aes-192-ccm",
211 .algo = RTE_CRYPTO_AEAD_AES_CCM,
219 .keyword = "aes-256-ccm",
220 .algo = RTE_CRYPTO_AEAD_AES_CCM,
228 .keyword = "chacha20-poly1305",
229 .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
238 #define SA_INIT_NB 128
240 static uint32_t nb_crypto_sessions;
241 struct ipsec_sa *sa_out;
243 static uint32_t sa_out_sz;
244 static struct ipsec_sa_cnt sa_out_cnt;
246 struct ipsec_sa *sa_in;
248 static uint32_t sa_in_sz;
249 static struct ipsec_sa_cnt sa_in_cnt;
251 static const struct supported_cipher_algo *
252 find_match_cipher_algo(const char *cipher_keyword)
256 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
257 const struct supported_cipher_algo *algo =
260 if (strcmp(cipher_keyword, algo->keyword) == 0)
267 static const struct supported_auth_algo *
268 find_match_auth_algo(const char *auth_keyword)
272 for (i = 0; i < RTE_DIM(auth_algos); i++) {
273 const struct supported_auth_algo *algo =
276 if (strcmp(auth_keyword, algo->keyword) == 0)
283 static const struct supported_aead_algo *
284 find_match_aead_algo(const char *aead_keyword)
288 for (i = 0; i < RTE_DIM(aead_algos); i++) {
289 const struct supported_aead_algo *algo =
292 if (strcmp(aead_keyword, algo->keyword) == 0)
300 * parse x:x:x:x.... hex number key string into uint8_t *key
302 * > 0: number of bytes parsed
306 parse_key_string(const char *key_str, uint8_t *key)
308 const char *pt_start = key_str, *pt_end = key_str;
309 uint32_t nb_bytes = 0;
311 while (pt_end != NULL) {
312 char sub_str[3] = {0};
314 pt_end = strchr(pt_start, ':');
316 if (pt_end == NULL) {
317 if (strlen(pt_start) > 2)
319 strncpy(sub_str, pt_start, 2);
321 if (pt_end - pt_start > 2)
324 strncpy(sub_str, pt_start, pt_end - pt_start);
325 pt_start = pt_end + 1;
328 key[nb_bytes++] = strtol(sub_str, NULL, 16);
335 extend_sa_arr(struct ipsec_sa **sa_tbl, uint32_t cur_cnt, uint32_t *cur_sz)
337 if (*sa_tbl == NULL) {
338 *sa_tbl = calloc(SA_INIT_NB, sizeof(struct ipsec_sa));
341 *cur_sz = SA_INIT_NB;
345 if (cur_cnt >= *cur_sz) {
346 *sa_tbl = realloc(*sa_tbl,
347 *cur_sz * sizeof(struct ipsec_sa) * 2);
350 /* clean reallocated extra space */
351 memset(&(*sa_tbl)[*cur_sz], 0,
352 *cur_sz * sizeof(struct ipsec_sa));
360 parse_sa_tokens(char **tokens, uint32_t n_tokens,
361 struct parse_status *status)
363 struct ipsec_sa *rule = NULL;
364 struct rte_ipsec_session *ips;
365 uint32_t ti; /*token index*/
366 uint32_t *ri /*rule index*/;
367 struct ipsec_sa_cnt *sa_cnt;
368 uint32_t cipher_algo_p = 0;
369 uint32_t auth_algo_p = 0;
370 uint32_t aead_algo_p = 0;
375 uint32_t portid_p = 0;
376 uint32_t fallback_p = 0;
377 int16_t status_p = 0;
378 uint16_t udp_encap_p = 0;
380 if (strcmp(tokens[0], "in") == 0) {
383 if (extend_sa_arr(&sa_in, nb_sa_in, &sa_in_sz) < 0)
386 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
389 sa_cnt = &sa_out_cnt;
390 if (extend_sa_arr(&sa_out, nb_sa_out, &sa_out_sz) < 0)
393 rule->direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
397 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
398 if (status->status < 0)
400 if (atoi(tokens[1]) == INVALID_SPI)
403 rule->spi = atoi(tokens[1]);
404 rule->portid = UINT16_MAX;
405 ips = ipsec_get_primary_session(rule);
407 for (ti = 2; ti < n_tokens; ti++) {
408 if (strcmp(tokens[ti], "mode") == 0) {
409 APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
410 if (status->status < 0)
413 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
414 if (status->status < 0)
417 if (strcmp(tokens[ti], "ipv4-tunnel") == 0) {
419 rule->flags |= IP4_TUNNEL;
420 } else if (strcmp(tokens[ti], "ipv6-tunnel") == 0) {
422 rule->flags |= IP6_TUNNEL;
423 } else if (strcmp(tokens[ti], "transport") == 0) {
426 rule->flags |= TRANSPORT;
428 APP_CHECK(0, status, "unrecognized "
429 "input \"%s\"", tokens[ti]);
437 if (strcmp(tokens[ti], "telemetry") == 0) {
438 rule->flags |= SA_TELEMETRY_ENABLE;
442 if (strcmp(tokens[ti], "cipher_algo") == 0) {
443 const struct supported_cipher_algo *algo;
446 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
448 if (status->status < 0)
451 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
452 if (status->status < 0)
455 algo = find_match_cipher_algo(tokens[ti]);
457 APP_CHECK(algo != NULL, status, "unrecognized "
458 "input \"%s\"", tokens[ti]);
460 if (status->status < 0)
463 rule->cipher_algo = algo->algo;
464 rule->block_size = algo->block_size;
465 rule->iv_len = algo->iv_len;
466 rule->cipher_key_len = algo->key_len;
468 /* for NULL algorithm, no cipher key required */
469 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
474 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
475 if (status->status < 0)
478 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
479 status, "unrecognized input \"%s\", "
480 "expect \"cipher_key\"", tokens[ti]);
481 if (status->status < 0)
484 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
485 if (status->status < 0)
488 key_len = parse_key_string(tokens[ti],
490 APP_CHECK(key_len == rule->cipher_key_len, status,
491 "unrecognized input \"%s\"", tokens[ti]);
492 if (status->status < 0)
495 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
496 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
497 rule->salt = (uint32_t)rte_rand();
499 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
501 rule->cipher_key_len = key_len;
503 &rule->cipher_key[key_len], 4);
510 if (strcmp(tokens[ti], "auth_algo") == 0) {
511 const struct supported_auth_algo *algo;
514 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
516 if (status->status < 0)
519 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
520 if (status->status < 0)
523 algo = find_match_auth_algo(tokens[ti]);
524 APP_CHECK(algo != NULL, status, "unrecognized "
525 "input \"%s\"", tokens[ti]);
527 if (status->status < 0)
530 rule->auth_algo = algo->algo;
531 rule->auth_key_len = algo->key_len;
532 rule->digest_len = algo->digest_len;
534 /* NULL algorithm and combined algos do not
537 if (algo->key_not_req) {
542 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
543 if (status->status < 0)
546 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
547 status, "unrecognized input \"%s\", "
548 "expect \"auth_key\"", tokens[ti]);
549 if (status->status < 0)
552 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
553 if (status->status < 0)
556 key_len = parse_key_string(tokens[ti],
558 APP_CHECK(key_len == rule->auth_key_len, status,
559 "unrecognized input \"%s\"", tokens[ti]);
560 if (status->status < 0)
563 if (algo->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
565 rule->auth_key_len = key_len;
566 rule->iv_len = algo->iv_len;
568 &rule->auth_key[key_len], 4);
575 if (strcmp(tokens[ti], "aead_algo") == 0) {
576 const struct supported_aead_algo *algo;
579 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
581 if (status->status < 0)
584 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
585 if (status->status < 0)
588 algo = find_match_aead_algo(tokens[ti]);
590 APP_CHECK(algo != NULL, status, "unrecognized "
591 "input \"%s\"", tokens[ti]);
593 if (status->status < 0)
596 rule->aead_algo = algo->algo;
597 rule->cipher_key_len = algo->key_len;
598 rule->digest_len = algo->digest_len;
599 rule->aad_len = algo->aad_len;
600 rule->block_size = algo->block_size;
601 rule->iv_len = algo->iv_len;
603 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
604 if (status->status < 0)
607 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
608 status, "unrecognized input \"%s\", "
609 "expect \"aead_key\"", tokens[ti]);
610 if (status->status < 0)
613 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
614 if (status->status < 0)
617 key_len = parse_key_string(tokens[ti],
619 APP_CHECK(key_len == rule->cipher_key_len, status,
620 "unrecognized input \"%s\"", tokens[ti]);
621 if (status->status < 0)
625 rule->cipher_key_len = key_len;
627 &rule->cipher_key[key_len], 4);
633 if (strcmp(tokens[ti], "src") == 0) {
634 APP_CHECK_PRESENCE(src_p, tokens[ti], status);
635 if (status->status < 0)
638 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
639 if (status->status < 0)
642 if (IS_IP4_TUNNEL(rule->flags)) {
645 APP_CHECK(parse_ipv4_addr(tokens[ti],
646 &ip, NULL) == 0, status,
647 "unrecognized input \"%s\", "
648 "expect valid ipv4 addr",
650 if (status->status < 0)
652 rule->src.ip.ip4 = rte_bswap32(
653 (uint32_t)ip.s_addr);
654 } else if (IS_IP6_TUNNEL(rule->flags)) {
657 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
659 "unrecognized input \"%s\", "
660 "expect valid ipv6 addr",
662 if (status->status < 0)
664 memcpy(rule->src.ip.ip6.ip6_b,
666 } else if (IS_TRANSPORT(rule->flags)) {
667 APP_CHECK(0, status, "unrecognized input "
668 "\"%s\"", tokens[ti]);
676 if (strcmp(tokens[ti], "dst") == 0) {
677 APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
678 if (status->status < 0)
681 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
682 if (status->status < 0)
685 if (IS_IP4_TUNNEL(rule->flags)) {
688 APP_CHECK(parse_ipv4_addr(tokens[ti],
689 &ip, NULL) == 0, status,
690 "unrecognized input \"%s\", "
691 "expect valid ipv4 addr",
693 if (status->status < 0)
695 rule->dst.ip.ip4 = rte_bswap32(
696 (uint32_t)ip.s_addr);
697 } else if (IS_IP6_TUNNEL(rule->flags)) {
700 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
702 "unrecognized input \"%s\", "
703 "expect valid ipv6 addr",
705 if (status->status < 0)
707 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
708 } else if (IS_TRANSPORT(rule->flags)) {
709 APP_CHECK(0, status, "unrecognized "
710 "input \"%s\"", tokens[ti]);
718 if (strcmp(tokens[ti], "type") == 0) {
719 APP_CHECK_PRESENCE(type_p, tokens[ti], status);
720 if (status->status < 0)
723 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
724 if (status->status < 0)
727 if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
729 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
730 else if (strcmp(tokens[ti],
731 "inline-protocol-offload") == 0)
733 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
734 else if (strcmp(tokens[ti],
735 "lookaside-protocol-offload") == 0)
737 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
738 else if (strcmp(tokens[ti], "no-offload") == 0)
739 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
740 else if (strcmp(tokens[ti], "cpu-crypto") == 0)
741 ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
743 APP_CHECK(0, status, "Invalid input \"%s\"",
752 if (strcmp(tokens[ti], "port_id") == 0) {
753 APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
754 if (status->status < 0)
756 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
757 if (status->status < 0)
759 if (rule->portid == UINT16_MAX)
760 rule->portid = atoi(tokens[ti]);
761 else if (rule->portid != atoi(tokens[ti])) {
763 "portid %s not matching with already assigned portid %u",
764 tokens[ti], rule->portid);
771 if (strcmp(tokens[ti], "mss") == 0) {
772 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
773 if (status->status < 0)
775 rule->mss = atoi(tokens[ti]);
776 if (status->status < 0)
781 if (strcmp(tokens[ti], "esn") == 0) {
782 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
783 if (status->status < 0)
785 rule->esn = atoll(tokens[ti]);
786 if (status->status < 0)
791 if (strcmp(tokens[ti], "fallback") == 0) {
792 struct rte_ipsec_session *fb;
794 APP_CHECK(app_sa_prm.enable, status, "Fallback session "
795 "not allowed for legacy mode.");
796 if (status->status < 0)
798 APP_CHECK(ips->type ==
799 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO, status,
800 "Fallback session allowed if primary session "
801 "is of type inline-crypto-offload only.");
802 if (status->status < 0)
804 APP_CHECK(rule->direction ==
805 RTE_SECURITY_IPSEC_SA_DIR_INGRESS, status,
806 "Fallback session not allowed for egress "
808 if (status->status < 0)
810 APP_CHECK_PRESENCE(fallback_p, tokens[ti], status);
811 if (status->status < 0)
813 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
814 if (status->status < 0)
816 fb = ipsec_get_fallback_session(rule);
817 if (strcmp(tokens[ti], "lookaside-none") == 0)
818 fb->type = RTE_SECURITY_ACTION_TYPE_NONE;
819 else if (strcmp(tokens[ti], "cpu-crypto") == 0)
820 fb->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
822 APP_CHECK(0, status, "unrecognized fallback "
823 "type %s.", tokens[ti]);
827 rule->fallback_sessions = 1;
828 nb_crypto_sessions++;
832 if (strcmp(tokens[ti], "flow-direction") == 0) {
834 case RTE_SECURITY_ACTION_TYPE_NONE:
835 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
837 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
838 if (status->status < 0)
840 if (rule->portid == UINT16_MAX)
841 rule->portid = atoi(tokens[ti]);
842 else if (rule->portid != atoi(tokens[ti])) {
844 "portid %s not matching with already assigned portid %u",
845 tokens[ti], rule->portid);
848 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
849 if (status->status < 0)
851 rule->fdir_qid = atoi(tokens[ti]);
852 /* validating portid and queueid */
853 status_p = check_flow_params(rule->portid,
856 printf("port id %u / queue id %u is "
857 "not valid\n", rule->portid,
861 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
862 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
863 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
866 "flow director not supported for security session type %d",
872 if (strcmp(tokens[ti], "udp-encap") == 0) {
874 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
875 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
876 APP_CHECK_PRESENCE(udp_encap_p, tokens[ti],
878 if (status->status < 0)
882 app_sa_prm.udp_encap = 1;
885 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
888 rule->udp.dport = 4500;
892 "UDP encapsulation not supported for "
893 "security session type %d",
900 /* unrecognizable input */
901 APP_CHECK(0, status, "unrecognized input \"%s\"",
907 APP_CHECK(cipher_algo_p == 0, status,
908 "AEAD used, no need for cipher options");
909 if (status->status < 0)
912 APP_CHECK(auth_algo_p == 0, status,
913 "AEAD used, no need for auth options");
914 if (status->status < 0)
917 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
918 if (status->status < 0)
921 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
922 if (status->status < 0)
926 APP_CHECK(mode_p == 1, status, "missing mode option");
927 if (status->status < 0)
930 if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
931 RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
932 printf("Missing portid option, falling back to non-offload\n");
934 if (!type_p || (!portid_p && ips->type !=
935 RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
936 ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
939 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
940 wrkr_flags |= INL_CR_F;
941 else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
942 wrkr_flags |= INL_PR_F;
943 else if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL)
944 wrkr_flags |= LA_PR_F;
946 wrkr_flags |= LA_ANY_F;
948 nb_crypto_sessions++;
953 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
957 const struct rte_ipsec_session *ips;
958 const struct rte_ipsec_session *fallback_ips;
960 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
962 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
963 if (cipher_algos[i].algo == sa->cipher_algo &&
964 cipher_algos[i].key_len == sa->cipher_key_len) {
965 printf("%s ", cipher_algos[i].keyword);
970 for (i = 0; i < RTE_DIM(auth_algos); i++) {
971 if (auth_algos[i].algo == sa->auth_algo) {
972 printf("%s ", auth_algos[i].keyword);
977 for (i = 0; i < RTE_DIM(aead_algos); i++) {
978 if (aead_algos[i].algo == sa->aead_algo &&
979 aead_algos[i].key_len-4 == sa->cipher_key_len) {
980 printf("%s ", aead_algos[i].keyword);
987 printf("UDP encapsulated ");
989 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
991 printf("IP4Tunnel ");
992 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
993 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
994 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
995 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
998 printf("IP6Tunnel ");
999 for (i = 0; i < 16; i++) {
1000 if (i % 2 && i != 15)
1001 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
1003 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
1006 for (i = 0; i < 16; i++) {
1007 if (i % 2 && i != 15)
1008 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
1010 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
1014 printf("Transport ");
1018 ips = &sa->sessions[IPSEC_SESSION_PRIMARY];
1020 switch (ips->type) {
1021 case RTE_SECURITY_ACTION_TYPE_NONE:
1022 printf("no-offload ");
1024 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1025 printf("inline-crypto-offload ");
1027 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1028 printf("inline-protocol-offload ");
1030 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
1031 printf("lookaside-protocol-offload ");
1033 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
1034 printf("cpu-crypto-accelerated ");
1038 fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
1039 if (fallback_ips != NULL && sa->fallback_sessions > 0) {
1040 printf("inline fallback: ");
1041 switch (fallback_ips->type) {
1042 case RTE_SECURITY_ACTION_TYPE_NONE:
1043 printf("lookaside-none");
1045 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
1046 printf("cpu-crypto-accelerated");
1053 if (sa->fdir_flag == 1)
1054 printf("flow-direction port %d queue %d", sa->portid,
1060 static struct sa_ctx *
1061 sa_create(const char *name, int32_t socket_id, uint32_t nb_sa)
1064 struct sa_ctx *sa_ctx;
1066 const struct rte_memzone *mz;
1068 snprintf(s, sizeof(s), "%s_%u", name, socket_id);
1070 /* Create SA context */
1071 printf("Creating SA context with %u maximum entries on socket %d\n",
1074 mz_size = sizeof(struct ipsec_xf) * nb_sa;
1075 mz = rte_memzone_reserve(s, mz_size, socket_id,
1076 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
1078 printf("Failed to allocate SA XFORM memory\n");
1083 sa_ctx = rte_zmalloc(NULL, sizeof(struct sa_ctx) +
1084 sizeof(struct ipsec_sa) * nb_sa, RTE_CACHE_LINE_SIZE);
1086 if (sa_ctx == NULL) {
1087 printf("Failed to allocate SA CTX memory\n");
1089 rte_memzone_free(mz);
1093 sa_ctx->xf = (struct ipsec_xf *)mz->addr;
1094 sa_ctx->nb_sa = nb_sa;
1100 check_eth_dev_caps(uint16_t portid, uint32_t inbound, uint32_t tso)
1102 struct rte_eth_dev_info dev_info;
1105 retval = rte_eth_dev_info_get(portid, &dev_info);
1108 "Error during getting device (port %u) info: %s\n",
1109 portid, strerror(-retval));
1115 if ((dev_info.rx_offload_capa &
1116 RTE_ETH_RX_OFFLOAD_SECURITY) == 0) {
1117 RTE_LOG(WARNING, PORT,
1118 "hardware RX IPSec offload is not supported\n");
1122 } else { /* outbound */
1123 if ((dev_info.tx_offload_capa &
1124 RTE_ETH_TX_OFFLOAD_SECURITY) == 0) {
1125 RTE_LOG(WARNING, PORT,
1126 "hardware TX IPSec offload is not supported\n");
1129 if (tso && (dev_info.tx_offload_capa &
1130 RTE_ETH_TX_OFFLOAD_TCP_TSO) == 0) {
1131 RTE_LOG(WARNING, PORT,
1132 "hardware TCP TSO offload is not supported\n");
1140 * Helper function, tries to determine next_proto for SPI
1141 * by searching though SP rules.
1144 get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir,
1145 struct ip_addr ip_addr[2], uint32_t mask[2])
1149 rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1151 rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
1157 "%s: SPI %u used simultaneously by "
1158 "IPv4(%d) and IPv6 (%d) SP rules\n",
1159 __func__, spi, rc4, rc6);
1162 return IPPROTO_IPIP;
1163 } else if (rc6 < 0) {
1165 "%s: SPI %u is not used by any SP rule\n",
1169 return IPPROTO_IPV6;
1173 * Helper function for getting source and destination IP addresses
1174 * from SP. Needed for inline crypto transport mode, as addresses are not
1175 * provided in config file for that mode. It checks if SP for current SA exists,
1176 * and based on what type of protocol is returned, it stores appropriate
1177 * addresses got from SP into SA.
1180 sa_add_address_inline_crypto(struct ipsec_sa *sa)
1183 struct ip_addr ip_addr[2];
1186 protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask);
1189 else if (protocol == IPPROTO_IPIP) {
1190 sa->flags |= IP4_TRANSPORT;
1191 if (mask[0] == IP4_FULL_MASK &&
1192 mask[1] == IP4_FULL_MASK &&
1193 ip_addr[0].ip.ip4 != 0 &&
1194 ip_addr[1].ip.ip4 != 0) {
1196 sa->src.ip.ip4 = ip_addr[0].ip.ip4;
1197 sa->dst.ip.ip4 = ip_addr[1].ip.ip4;
1200 "%s: No valid address or mask entry in"
1201 " IPv4 SP rule for SPI %u\n",
1205 } else if (protocol == IPPROTO_IPV6) {
1206 sa->flags |= IP6_TRANSPORT;
1207 if (mask[0] == IP6_FULL_MASK &&
1208 mask[1] == IP6_FULL_MASK &&
1209 (ip_addr[0].ip.ip6.ip6[0] != 0 ||
1210 ip_addr[0].ip.ip6.ip6[1] != 0) &&
1211 (ip_addr[1].ip.ip6.ip6[0] != 0 ||
1212 ip_addr[1].ip.ip6.ip6[1] != 0)) {
1214 sa->src.ip.ip6 = ip_addr[0].ip.ip6;
1215 sa->dst.ip.ip6 = ip_addr[1].ip.ip6;
1218 "%s: No valid address or mask entry in"
1219 " IPv6 SP rule for SPI %u\n",
1228 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1229 uint32_t nb_entries, uint32_t inbound,
1230 struct socket_ctx *skt_ctx)
1232 struct ipsec_sa *sa;
1234 uint16_t iv_length, aad_length;
1237 struct rte_ipsec_session *ips;
1239 /* for ESN upper 32 bits of SQN also need to be part of AAD */
1240 aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
1242 for (i = 0; i < nb_entries; i++) {
1244 sa = &sa_ctx->sa[idx];
1246 printf("Index %u already in use by SPI %u\n",
1253 rc = ipsec_sad_add(&sa_ctx->sad, sa);
1259 ips = ipsec_get_primary_session(sa);
1261 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1262 ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1263 if (check_eth_dev_caps(sa->portid, inbound, sa->mss))
1267 switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
1269 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
1270 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
1274 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1276 sa_add_address_inline_crypto(sa);
1277 if (inline_status < 0)
1278 return inline_status;
1284 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM ||
1285 sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM ||
1286 sa->aead_algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
1288 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
1293 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
1294 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
1295 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
1296 sa_ctx->xf[idx].a.aead.key.length =
1298 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
1299 RTE_CRYPTO_AEAD_OP_DECRYPT :
1300 RTE_CRYPTO_AEAD_OP_ENCRYPT;
1301 sa_ctx->xf[idx].a.next = NULL;
1302 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
1303 sa_ctx->xf[idx].a.aead.iv.length = iv_length;
1304 sa_ctx->xf[idx].a.aead.aad_length =
1305 sa->aad_len + aad_length;
1306 sa_ctx->xf[idx].a.aead.digest_length =
1309 sa->xforms = &sa_ctx->xf[idx].a;
1311 switch (sa->cipher_algo) {
1312 case RTE_CRYPTO_CIPHER_NULL:
1313 case RTE_CRYPTO_CIPHER_3DES_CBC:
1314 case RTE_CRYPTO_CIPHER_AES_CBC:
1315 case RTE_CRYPTO_CIPHER_AES_CTR:
1316 iv_length = sa->iv_len;
1319 RTE_LOG(ERR, IPSEC_ESP,
1320 "unsupported cipher algorithm %u\n",
1325 /* AES_GMAC uses salt like AEAD algorithms */
1326 if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC)
1330 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1331 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
1332 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
1333 sa_ctx->xf[idx].b.cipher.key.length =
1335 sa_ctx->xf[idx].b.cipher.op =
1336 RTE_CRYPTO_CIPHER_OP_DECRYPT;
1337 sa_ctx->xf[idx].b.next = NULL;
1338 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
1339 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
1341 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1342 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
1343 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
1344 sa_ctx->xf[idx].a.auth.key.length =
1346 sa_ctx->xf[idx].a.auth.digest_length =
1348 sa_ctx->xf[idx].a.auth.op =
1349 RTE_CRYPTO_AUTH_OP_VERIFY;
1350 sa_ctx->xf[idx].a.auth.iv.offset = IV_OFFSET;
1351 sa_ctx->xf[idx].a.auth.iv.length = iv_length;
1353 } else { /* outbound */
1354 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1355 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
1356 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
1357 sa_ctx->xf[idx].a.cipher.key.length =
1359 sa_ctx->xf[idx].a.cipher.op =
1360 RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1361 sa_ctx->xf[idx].a.next = NULL;
1362 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
1363 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
1365 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
1366 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
1367 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
1368 sa_ctx->xf[idx].b.auth.key.length =
1370 sa_ctx->xf[idx].b.auth.digest_length =
1372 sa_ctx->xf[idx].b.auth.op =
1373 RTE_CRYPTO_AUTH_OP_GENERATE;
1374 sa_ctx->xf[idx].b.auth.iv.offset = IV_OFFSET;
1375 sa_ctx->xf[idx].b.auth.iv.length = iv_length;
1379 if (sa->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
1380 sa->xforms = inbound ?
1381 &sa_ctx->xf[idx].a : &sa_ctx->xf[idx].b;
1382 sa->xforms->next = NULL;
1385 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
1386 sa_ctx->xf[idx].b.next = NULL;
1387 sa->xforms = &sa_ctx->xf[idx].a;
1392 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
1394 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
1395 rc = create_inline_session(skt_ctx, sa, ips);
1397 RTE_LOG(ERR, IPSEC_ESP,
1398 "create_inline_session() failed\n");
1403 if (sa->fdir_flag && inbound) {
1404 rc = create_ipsec_esp_flow(sa);
1406 RTE_LOG(ERR, IPSEC_ESP,
1407 "create_ipsec_esp_flow() failed\n");
1409 print_one_sa_rule(sa, inbound);
1416 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1417 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1419 return sa_add_rules(sa_ctx, entries, nb_entries, 0, skt_ctx);
1423 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
1424 uint32_t nb_entries, struct socket_ctx *skt_ctx)
1426 return sa_add_rules(sa_ctx, entries, nb_entries, 1, skt_ctx);
1430 * helper function, fills parameters that are identical for all SAs
1433 fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
1434 const struct app_sa_prm *app_prm)
1436 memset(prm, 0, sizeof(*prm));
1438 prm->flags = app_prm->flags;
1439 prm->ipsec_xform.options.esn = app_prm->enable_esn;
1440 prm->ipsec_xform.replay_win_sz = app_prm->window_size;
1444 fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
1445 const struct rte_ipv4_hdr *v4, struct rte_ipv6_hdr *v6)
1450 * Try to get SPI next proto by searching that SPI in SPD.
1451 * probably not the optimal way, but there seems nothing
1454 rc = get_spi_proto(ss->spi, ss->direction, NULL, NULL);
1458 fill_ipsec_app_sa_prm(prm, &app_sa_prm);
1459 prm->userdata = (uintptr_t)ss;
1461 /* setup ipsec xform */
1462 prm->ipsec_xform.spi = ss->spi;
1463 prm->ipsec_xform.salt = ss->salt;
1464 prm->ipsec_xform.direction = ss->direction;
1465 prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
1466 prm->ipsec_xform.mode = (IS_TRANSPORT(ss->flags)) ?
1467 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
1468 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
1469 prm->ipsec_xform.options.udp_encap = ss->udp_encap;
1470 prm->ipsec_xform.options.ecn = 1;
1471 prm->ipsec_xform.options.copy_dscp = 1;
1473 if (IS_IP4_TUNNEL(ss->flags)) {
1474 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
1475 prm->tun.hdr_len = sizeof(*v4);
1476 prm->tun.next_proto = rc;
1478 } else if (IS_IP6_TUNNEL(ss->flags)) {
1479 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1480 prm->tun.hdr_len = sizeof(*v6);
1481 prm->tun.next_proto = rc;
1484 /* transport mode */
1485 prm->trs.proto = rc;
1488 /* setup crypto section */
1489 prm->crypto_xform = ss->xforms;
1494 fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa)
1500 if (ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1501 ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
1502 if (ss->security.ses != NULL) {
1503 rc = rte_ipsec_session_prepare(ss);
1505 memset(ss, 0, sizeof(*ss));
1513 * Initialise related rte_ipsec_sa object.
1516 ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1519 struct rte_ipsec_sa_prm prm;
1520 struct rte_ipsec_session *ips;
1521 struct rte_ipv4_hdr v4 = {
1522 .version_ihl = IPVERSION << 4 |
1523 sizeof(v4) / RTE_IPV4_IHL_MULTIPLIER,
1524 .time_to_live = IPDEFTTL,
1525 .next_proto_id = IPPROTO_ESP,
1526 .src_addr = lsa->src.ip.ip4,
1527 .dst_addr = lsa->dst.ip.ip4,
1529 struct rte_ipv6_hdr v6 = {
1530 .vtc_flow = htonl(IP6_VERSION << 28),
1531 .proto = IPPROTO_ESP,
1534 if (IS_IP6_TUNNEL(lsa->flags)) {
1535 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1536 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1539 rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1541 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1545 if (lsa->flags & SA_TELEMETRY_ENABLE)
1546 rte_ipsec_telemetry_sa_add(sa);
1548 /* init primary processing session */
1549 ips = ipsec_get_primary_session(lsa);
1550 rc = fill_ipsec_session(ips, sa);
1554 /* init inline fallback processing session */
1555 if (lsa->fallback_sessions == 1)
1556 rc = fill_ipsec_session(ipsec_get_fallback_session(lsa), sa);
1562 * Allocate space and init rte_ipsec_sa structures,
1566 ipsec_satbl_init(struct sa_ctx *ctx, uint32_t nb_ent, int32_t socket)
1571 struct rte_ipsec_sa *sa;
1572 struct ipsec_sa *lsa;
1573 struct rte_ipsec_sa_prm prm;
1575 /* determine SA size */
1577 fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1578 sz = rte_ipsec_sa_size(&prm);
1580 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1581 "failed to determine SA size, error code: %d\n",
1582 __func__, ctx, nb_ent, socket, sz);
1588 ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1589 if (ctx->satbl == NULL) {
1591 "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1592 __func__, ctx, nb_ent, socket, tsz);
1597 for (i = 0; i != nb_ent && rc == 0; i++) {
1601 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1602 lsa = ctx->sa + idx;
1604 rc = ipsec_sa_init(lsa, sa, sz);
1611 sa_cmp(const void *p, const void *q)
1613 uint32_t spi1 = ((const struct ipsec_sa *)p)->spi;
1614 uint32_t spi2 = ((const struct ipsec_sa *)q)->spi;
1616 return (int)(spi1 - spi2);
1620 * Walk through all SA rules to find an SA with given SPI
1623 sa_spi_present(struct sa_ctx *sa_ctx, uint32_t spi, int inbound)
1626 struct ipsec_sa *sa;
1627 struct ipsec_sa tmpl;
1628 const struct ipsec_sa *sar;
1638 sa = bsearch(&tmpl, sar, num, sizeof(struct ipsec_sa), sa_cmp);
1640 return RTE_PTR_DIFF(sa, sar) / sizeof(struct ipsec_sa);
1646 sa_init(struct socket_ctx *ctx, int32_t socket_id)
1652 rte_exit(EXIT_FAILURE, "NULL context.\n");
1654 if (ctx->sa_in != NULL)
1655 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1656 "initialized\n", socket_id);
1658 if (ctx->sa_out != NULL)
1659 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1660 "initialized\n", socket_id);
1664 ctx->sa_in = sa_create(name, socket_id, nb_sa_in);
1665 if (ctx->sa_in == NULL)
1666 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1667 "context %s in socket %d\n", rte_errno,
1670 rc = ipsec_sad_create(name, &ctx->sa_in->sad, socket_id,
1673 rte_exit(EXIT_FAILURE, "failed to init SAD\n");
1675 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in, ctx);
1677 if (app_sa_prm.enable != 0) {
1678 rc = ipsec_satbl_init(ctx->sa_in, nb_sa_in,
1681 rte_exit(EXIT_FAILURE,
1682 "failed to init inbound SAs\n");
1685 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1687 if (nb_sa_out > 0) {
1689 ctx->sa_out = sa_create(name, socket_id, nb_sa_out);
1690 if (ctx->sa_out == NULL)
1691 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1692 "context %s in socket %d\n", rte_errno,
1695 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out, ctx);
1697 if (app_sa_prm.enable != 0) {
1698 rc = ipsec_satbl_init(ctx->sa_out, nb_sa_out,
1701 rte_exit(EXIT_FAILURE,
1702 "failed to init outbound SAs\n");
1705 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1710 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1712 struct ipsec_mbuf_metadata *priv;
1713 struct ipsec_sa *sa;
1718 return (sa_ctx->sa[sa_idx].spi == sa->spi);
1720 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1725 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1726 void *sa_arr[], uint16_t nb_pkts)
1730 struct ipsec_sa *sa;
1732 sad_lookup(&sa_ctx->sad, pkts, sa_arr, nb_pkts);
1735 * Mark need for inline offload fallback on the LSB of SA pointer.
1736 * Thanks to packet grouping mechanism which ipsec_process is using
1737 * packets marked for fallback processing will form separate group.
1739 * Because it is not safe to use SA pointer it is casted to generic
1740 * pointer to prevent from unintentional use. Use ipsec_mask_saptr
1741 * to get valid struct pointer.
1743 for (i = 0; i < nb_pkts; i++) {
1744 if (sa_arr[i] == NULL)
1747 result_sa = sa = sa_arr[i];
1748 if (MBUF_NO_SEC_OFFLOAD(pkts[i]) &&
1749 sa->fallback_sessions > 0) {
1750 uintptr_t intsa = (uintptr_t)sa;
1751 intsa |= IPSEC_SA_OFFLOAD_FALLBACK_FLAG;
1752 result_sa = (void *)intsa;
1754 sa_arr[i] = result_sa;
1759 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1760 void *sa[], uint16_t nb_pkts)
1764 for (i = 0; i < nb_pkts; i++)
1765 sa[i] = &sa_ctx->sa[sa_idx[i]];
1769 * Select HW offloads to be used.
1772 sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1773 uint64_t *tx_offloads)
1775 struct ipsec_sa *rule;
1777 enum rte_security_session_action_type rule_type;
1778 struct rte_eth_dev_info dev_info;
1784 ret = rte_eth_dev_info_get(port_id, &dev_info);
1786 rte_exit(EXIT_FAILURE,
1787 "Error during getting device (port %u) info: %s\n",
1788 port_id, strerror(-ret));
1790 /* Check for inbound rules that use offloads and use this port */
1791 for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1792 rule = &sa_in[idx_sa];
1793 rule_type = ipsec_get_action_type(rule);
1794 if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1796 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1797 && rule->portid == port_id)
1798 *rx_offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
1801 /* Check for outbound rules that use offloads and use this port */
1802 for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1803 rule = &sa_out[idx_sa];
1804 rule_type = ipsec_get_action_type(rule);
1805 switch (rule_type) {
1806 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
1807 /* Checksum offload is not needed for inline protocol as
1808 * all processing for Outbound IPSec packets will be
1809 * implicitly taken care and for non-IPSec packets,
1810 * there is no need of IPv4 Checksum offload.
1812 if (rule->portid == port_id) {
1813 *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
1815 *tx_offloads |= (RTE_ETH_TX_OFFLOAD_TCP_TSO |
1816 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
1819 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
1820 if (rule->portid == port_id) {
1821 *tx_offloads |= RTE_ETH_TX_OFFLOAD_SECURITY;
1824 RTE_ETH_TX_OFFLOAD_TCP_TSO;
1825 *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
1829 /* Enable IPv4 checksum offload even if one of lookaside
1832 if (dev_info.tx_offload_capa &
1833 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
1834 *tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
1844 qsort(sa_in, nb_sa_in, sizeof(struct ipsec_sa), sa_cmp);
1845 qsort(sa_out, nb_sa_out, sizeof(struct ipsec_sa), sa_cmp);
1849 get_nb_crypto_sessions(void)
1851 return nb_crypto_sessions;